--- a/.hgtags Fri Apr 27 11:33:22 2018 +0100
+++ b/.hgtags Fri Apr 27 12:29:49 2018 +0100
@@ -482,3 +482,4 @@
0c3e252cea44f06aef570ef464950ab97c669970 jdk-11+9
6fa770f9f8ab296e1ce255ec17ccf6d4e1051886 jdk-10+46
69d7398038c54774d9395b6810e0cca335edc02c jdk-11+10
+e1e60f75cd39312a7f59d2a4f91d624e5aecc95e jdk-11+11
--- a/make/autoconf/flags-cflags.m4 Fri Apr 27 11:33:22 2018 +0100
+++ b/make/autoconf/flags-cflags.m4 Fri Apr 27 12:29:49 2018 +0100
@@ -128,16 +128,22 @@
AC_ARG_ENABLE([warnings-as-errors], [AS_HELP_STRING([--disable-warnings-as-errors],
[do not consider native warnings to be an error @<:@enabled@:>@])])
+ # Set default value.
+ if test "x$TOOLCHAIN_TYPE" = xxlc; then
+ WARNINGS_AS_ERRORS=false
+ else
+ WARNINGS_AS_ERRORS=true
+ fi
+
AC_MSG_CHECKING([if native warnings are errors])
if test "x$enable_warnings_as_errors" = "xyes"; then
AC_MSG_RESULT([yes (explicitly set)])
WARNINGS_AS_ERRORS=true
elif test "x$enable_warnings_as_errors" = "xno"; then
- AC_MSG_RESULT([no])
+ AC_MSG_RESULT([no (explicitly set)])
WARNINGS_AS_ERRORS=false
elif test "x$enable_warnings_as_errors" = "x"; then
- AC_MSG_RESULT([yes (default)])
- WARNINGS_AS_ERRORS=true
+ AC_MSG_RESULT([${WARNINGS_AS_ERRORS} (default)])
else
AC_MSG_ERROR([--enable-warnings-as-errors accepts no argument])
fi
--- a/make/hotspot/lib/JvmFeatures.gmk Fri Apr 27 11:33:22 2018 +0100
+++ b/make/hotspot/lib/JvmFeatures.gmk Fri Apr 27 12:29:49 2018 +0100
@@ -32,7 +32,7 @@
ifeq ($(call check-jvm-feature, compiler1), true)
JVM_CFLAGS_FEATURES += -DCOMPILER1
else
- JVM_EXCLUDE_PATTERNS += c1_
+ JVM_EXCLUDE_PATTERNS += c1_ c1/
endif
ifeq ($(call check-jvm-feature, compiler2), true)
--- a/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java Fri Apr 27 11:33:22 2018 +0100
+++ b/make/jdk/src/classes/build/tools/cldrconverter/Bundle.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -318,16 +318,17 @@
}
for (Iterator<String> it = myMap.keySet().iterator(); it.hasNext();) {
String key = it.next();
- if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
+ if (key.startsWith(CLDRConverter.TIMEZONE_ID_PREFIX)
|| key.startsWith(CLDRConverter.METAZONE_ID_PREFIX)) {
@SuppressWarnings("unchecked")
Map<String, String> nameMap = (Map<String, String>) myMap.get(key);
+
// Convert key/value pairs to an array.
String[] names = new String[ZONE_NAME_KEYS.length];
int ix = 0;
for (String nameKey : ZONE_NAME_KEYS) {
String name = nameMap.get(nameKey);
- if (name == null) {
+ if (name == null && parentsMap != null) {
@SuppressWarnings("unchecked")
Map<String, String> parentNames = (Map<String, String>) parentsMap.get(key);
if (parentNames != null) {
@@ -357,29 +358,6 @@
}
}
}
- // If there are still any nulls, try filling in them from en data.
- if (hasNulls(names) && !id.equals("en")) {
- @SuppressWarnings("unchecked")
- String[] enNames = (String[]) Bundle.getBundle("en").getTargetMap().get(key);
- if (enNames == null) {
- if (metaKey != null) {
- @SuppressWarnings("unchecked")
- String[] metaNames = (String[]) Bundle.getBundle("en").getTargetMap().get(metaKey);
- enNames = metaNames;
- }
- }
- if (enNames != null) {
- for (int i = 0; i < names.length; i++) {
- if (names[i] == null) {
- names[i] = enNames[i];
- }
- }
- }
- // If there are still nulls, give up names.
- if (hasNulls(names)) {
- names = null;
- }
- }
}
// replace the Map with the array
if (names != null) {
@@ -662,12 +640,12 @@
if (CLDRConverter.handlerMetaZones.get(tz).equals(meta)) {
tzid = tz;
break;
- }
}
}
+ }
} else {
tzid = key.substring(CLDRConverter.TIMEZONE_ID_PREFIX.length());
- }
+ }
if (tzid != null) {
for (Object[] jreZone : jreTimeZoneNames) {
@@ -676,13 +654,13 @@
if (map.get(ZONE_NAME_KEYS[i]) == null) {
String[] jreNames = (String[])jreZone[1];
map.put(ZONE_NAME_KEYS[i], jreNames[i]);
+ }
+ }
+ break;
}
}
- break;
}
}
- }
- }
private void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
switch (cldrLetter) {
--- a/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java Fri Apr 27 11:33:22 2018 +0100
+++ b/make/jdk/src/classes/build/tools/cldrconverter/CLDRConverter.java Fri Apr 27 12:29:49 2018 +0100
@@ -31,6 +31,7 @@
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.*;
+import java.text.MessageFormat;
import java.time.*;
import java.util.*;
import java.util.ResourceBundle.Control;
@@ -82,9 +83,11 @@
static final String CALENDAR_FIRSTDAY_PREFIX = "firstDay.";
static final String CALENDAR_MINDAYS_PREFIX = "minDays.";
static final String TIMEZONE_ID_PREFIX = "timezone.id.";
+ static final String EXEMPLAR_CITY_PREFIX = "timezone.excity.";
static final String ZONE_NAME_PREFIX = "timezone.displayname.";
static final String METAZONE_ID_PREFIX = "metazone.id.";
static final String PARENT_LOCALE_PREFIX = "parentLocale.";
+ static final String[] EMPTY_ZONE = {"", "", "", "", "", ""};
private static SupplementDataParseHandler handlerSuppl;
private static SupplementalMetadataParseHandler handlerSupplMeta;
@@ -662,23 +665,18 @@
Arrays.deepEquals(data,
(String[])map.get(METAZONE_ID_PREFIX + me.getValue())))
.findAny();
- if (cldrMeta.isPresent()) {
- names.put(tzid, cldrMeta.get().getValue());
- } else {
+ cldrMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
// check the JRE meta key, add if there is not.
Optional<Map.Entry<String[], String>> jreMeta =
jreMetaMap.entrySet().stream()
.filter(jm -> Arrays.deepEquals(data, jm.getKey()))
.findAny();
- if (jreMeta.isPresent()) {
- names.put(tzid, jreMeta.get().getValue());
- } else {
- String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
- names.put(METAZONE_ID_PREFIX + metaName, data);
- names.put(tzid, metaName);
- jreMetaMap.put(data, metaName);
- }
- }
+ jreMeta.ifPresentOrElse(meta -> names.put(tzid, meta.getValue()), () -> {
+ String metaName = "JRE_" + tzid.replaceAll("[/-]", "_");
+ names.put(METAZONE_ID_PREFIX + metaName, data);
+ names.put(tzid, metaName);
+ });
+ });
}
});
}
@@ -705,6 +703,26 @@
}
});
+ // exemplar cities.
+ Map<String, Object> exCities = map.entrySet().stream()
+ .filter(e -> e.getKey().startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX))
+ .collect(Collectors
+ .toMap(Map.Entry::getKey, Map.Entry::getValue));
+ names.putAll(exCities);
+
+ if (!id.equals("en") &&
+ !names.isEmpty()) {
+ // CLDR does not have UTC entry, so add it here.
+ names.put("UTC", EMPTY_ZONE);
+
+ // no metazone zones
+ Arrays.asList(handlerMetaZones.get(MetaZonesParseHandler.NO_METAZONE_KEY)
+ .split("\\s")).stream()
+ .forEach(tz -> {
+ names.put(tz, EMPTY_ZONE);
+ });
+ }
+
return names;
}
@@ -769,6 +787,10 @@
"field.hour",
"timezone.hourFormat",
"timezone.gmtFormat",
+ "timezone.gmtZeroFormat",
+ "timezone.regionFormat",
+ "timezone.regionFormat.daylight",
+ "timezone.regionFormat.standard",
"field.minute",
"field.second",
"field.zone",
--- a/make/jdk/src/classes/build/tools/cldrconverter/LDMLParseHandler.java Fri Apr 27 11:33:22 2018 +0100
+++ b/make/jdk/src/classes/build/tools/cldrconverter/LDMLParseHandler.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -103,19 +103,30 @@
case "key":
// for LocaleNames
// copy string
- pushStringEntry(qName, attributes,
- CLDRConverter.LOCALE_KEY_PREFIX +
- convertOldKeyName(attributes.getValue("type")));
+ {
+ String key = convertOldKeyName(attributes.getValue("type"));
+ if (key.length() == 2) {
+ pushStringEntry(qName, attributes,
+ CLDRConverter.LOCALE_KEY_PREFIX + key);
+ } else {
+ pushIgnoredContainer(qName);
+ }
+ }
break;
case "type":
// for LocaleNames/CalendarNames
// copy string
- pushStringEntry(qName, attributes,
- CLDRConverter.LOCALE_TYPE_PREFIX +
- convertOldKeyName(attributes.getValue("key")) + "." +
- attributes.getValue("type"));
-
+ {
+ String key = convertOldKeyName(attributes.getValue("key"));
+ if (key.length() == 2) {
+ pushStringEntry(qName, attributes,
+ CLDRConverter.LOCALE_TYPE_PREFIX + key + "." +
+ attributes.getValue("type"));
+ } else {
+ pushIgnoredContainer(qName);
+ }
+ }
break;
//
@@ -445,6 +456,16 @@
case "gmtFormat":
pushStringEntry(qName, attributes, "timezone.gmtFormat");
break;
+ case "gmtZeroFormat":
+ pushStringEntry(qName, attributes, "timezone.gmtZeroFormat");
+ break;
+ case "regionFormat":
+ {
+ String type = attributes.getValue("type");
+ pushStringEntry(qName, attributes, "timezone.regionFormat" +
+ (type == null ? "" : "." + type));
+ }
+ break;
case "zone":
{
String tzid = attributes.getValue("type"); // Olson tz id
@@ -474,8 +495,8 @@
case "daylight": // daylight saving (summer) time name
pushStringEntry(qName, attributes, CLDRConverter.ZONE_NAME_PREFIX + qName + "." + zoneNameStyle);
break;
- case "exemplarCity": // not used in JDK
- pushIgnoredContainer(qName);
+ case "exemplarCity":
+ pushStringEntry(qName, attributes, CLDRConverter.EXEMPLAR_CITY_PREFIX);
break;
//
@@ -877,11 +898,16 @@
case "generic":
case "standard":
case "daylight":
+ case "exemplarCity":
if (zonePrefix != null && (currentContainer instanceof Entry)) {
@SuppressWarnings("unchecked")
Map<String, String> valmap = (Map<String, String>) get(zonePrefix + getContainerKey());
Entry<?> entry = (Entry<?>) currentContainer;
- valmap.put(entry.getKey(), (String) entry.getValue());
+ if (qName.equals("exemplarCity")) {
+ put(CLDRConverter.EXEMPLAR_CITY_PREFIX + getContainerKey(), (String) entry.getValue());
+ } else {
+ valmap.put(entry.getKey(), (String) entry.getValue());
+ }
}
break;
--- a/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java Fri Apr 27 11:33:22 2018 +0100
+++ b/make/jdk/src/classes/build/tools/cldrconverter/MetaZonesParseHandler.java Fri Apr 27 12:29:49 2018 +0100
@@ -35,6 +35,8 @@
import org.xml.sax.SAXException;
class MetaZonesParseHandler extends AbstractLDMLHandler<String> {
+ final static String NO_METAZONE_KEY = "no.metazone.defined";
+
private String tzid, metazone;
// for java.time.format.ZoneNames.java
@@ -101,10 +103,17 @@
assert qName.equals(currentContainer.getqName()) : "current=" + currentContainer.getqName() + ", param=" + qName;
switch (qName) {
case "timezone":
- if (tzid == null || metazone == null) {
+ if (tzid == null) {
throw new InternalError();
+ } else if (metazone == null) {
+ String no_meta = get(NO_METAZONE_KEY);
+ put(NO_METAZONE_KEY, no_meta == null ? tzid : no_meta + " " + tzid);
+ CLDRConverter.info("No metazone defined for %s%n", tzid);
+ } else {
+ put(tzid, metazone);
}
- put(tzid, metazone);
+ tzid = null;
+ metazone = null;
break;
}
currentContainer = currentContainer.getParent();
--- a/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java Fri Apr 27 11:33:22 2018 +0100
+++ b/make/jdk/src/classes/build/tools/cldrconverter/ResourceBundleGenerator.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -211,11 +211,13 @@
if (value == null) {
CLDRConverter.warning("null value for " + key);
} else if (value instanceof String) {
- if (type == BundleType.TIMEZONE ||
- ((String)value).startsWith(META_VALUE_PREFIX)) {
- out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+ String valStr = (String)value;
+ if (type == BundleType.TIMEZONE &&
+ !key.startsWith(CLDRConverter.EXEMPLAR_CITY_PREFIX) ||
+ valStr.startsWith(META_VALUE_PREFIX)) {
+ out.printf(" { \"%s\", %s },\n", key, CLDRConverter.saveConvert(valStr, useJava));
} else {
- out.printf(" { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert((String) value, useJava));
+ out.printf(" { \"%s\", \"%s\" },\n", key, CLDRConverter.saveConvert(valStr, useJava));
}
} else if (value instanceof String[]) {
String[] values = (String[]) value;
@@ -308,15 +310,20 @@
// end of static initializer block.
- // Short TZ names for delayed initialization
+ // Canonical TZ names for delayed initialization
if (CLDRConverter.isBaseModule) {
- out.printf(" private static class TZShortIDMapHolder {\n");
- out.printf(" static final Map<String, String> tzShortIDMap = new HashMap<>();\n");
+ out.printf(" private static class TZCanonicalIDMapHolder {\n");
+ out.printf(" static final Map<String, String> tzCanonicalIDMap = new HashMap<>(600);\n");
out.printf(" static {\n");
CLDRConverter.handlerTimeZone.getData().entrySet().stream()
.forEach(e -> {
- out.printf(" tzShortIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
- ((String)e.getValue()));
+ String[] ids = ((String)e.getValue()).split("\\s");
+ out.printf(" tzCanonicalIDMap.put(\"%s\", \"%s\");\n", e.getKey(),
+ ids[0]);
+ for (int i = 1; i < ids.length; i++) {
+ out.printf(" tzCanonicalIDMap.put(\"%s\", \"%s\");\n", ids[i],
+ ids[0]);
+ }
});
out.printf(" }\n }\n\n");
}
@@ -333,8 +340,8 @@
if (CLDRConverter.isBaseModule) {
out.printf(" @Override\n" +
- " public Map<String, String> tzShortIDs() {\n" +
- " return TZShortIDMapHolder.tzShortIDMap;\n" +
+ " public Map<String, String> tzCanonicalIDs() {\n" +
+ " return TZCanonicalIDMapHolder.tzCanonicalIDMap;\n" +
" }\n\n");
out.printf(" public Map<Locale, String[]> parentLocales() {\n" +
" return parentLocalesMap;\n" +
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -32,9 +32,6 @@
#include "nativeInst_aarch64.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif
#define __ ce->masm()->
@@ -350,42 +347,4 @@
__ b(_continuation);
}
-
-/////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
- // At this point we know that marking is in progress.
- // If do_load() is true then we have to emit the
- // load of the previous value; otherwise it has already
- // been loaded into _pre_val.
-
- __ bind(_entry);
- assert(pre_val()->is_register(), "Precondition.");
-
- Register pre_val_reg = pre_val()->as_register();
-
- if (do_load()) {
- ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
- }
- __ cbz(pre_val_reg, _continuation);
- ce->store_parameter(pre_val()->as_register(), 0);
- __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
- __ b(_continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
- assert(addr()->is_register(), "Precondition.");
- assert(new_val()->is_register(), "Precondition.");
- Register new_val_reg = new_val()->as_register();
- __ cbz(new_val_reg, _continuation);
- ce->store_parameter(addr()->as_pointer_register(), 0);
- __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
- __ b(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-/////////////////////////////////////////////////////////////////////////////
-
#undef __
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1558,7 +1558,16 @@
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
assert(VM_Version::supports_cx8(), "wrong machine");
- Register addr = as_reg(op->addr());
+ Register addr;
+ if (op->addr()->is_register()) {
+ addr = as_reg(op->addr());
+ } else {
+ assert(op->addr()->is_address(), "what else?");
+ LIR_Address* addr_ptr = op->addr()->as_address_ptr();
+ assert(addr_ptr->disp() == 0, "need 0 disp");
+ assert(addr_ptr->index() == LIR_OprDesc::illegalOpr(), "need 0 index");
+ addr = as_reg(addr_ptr->base());
+ }
Register newval = as_reg(op->new_value());
Register cmpval = as_reg(op->cmp_value());
Label succeed, fail, around;
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -144,8 +144,22 @@
// accumulate fixed displacements
if (index->is_constant()) {
- large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
- index = LIR_OprFact::illegalOpr;
+ LIR_Const *constant = index->as_constant_ptr();
+ if (constant->type() == T_INT) {
+ large_disp += index->as_jint() << shift;
+ } else {
+ assert(constant->type() == T_LONG, "should be");
+ jlong c = index->as_jlong() << shift;
+ if ((jlong)((jint)c) == c) {
+ large_disp += c;
+ index = LIR_OprFact::illegalOpr;
+ } else {
+ LIR_Opr tmp = new_register(T_LONG);
+ __ move(index, tmp);
+ index = tmp;
+ // apply shift and displacement below
+ }
+ }
}
if (index->is_register()) {
@@ -183,9 +197,8 @@
}
}
-
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
- BasicType type, bool needs_card_mark) {
+ BasicType type) {
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
@@ -206,16 +219,7 @@
LIR_Address::scale(type),
offset_in_bytes, type);
}
- if (needs_card_mark) {
- // This store will need a precise card mark, so go ahead and
- // compute the full adddres instead of computing once for the
- // store and again for the card mark.
- LIR_Opr tmp = new_pointer_register();
- __ leal(LIR_OprFact::address(addr), tmp);
- return new LIR_Address(tmp, type);
- } else {
- return addr;
- }
+ return addr;
}
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@@ -305,87 +309,17 @@
__ store(item, new LIR_Address(FrameMap::sp_opr, in_bytes(offset_from_sp), type));
}
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+ LIR_Opr tmp1 = new_register(objectType);
+ LIR_Opr tmp2 = new_register(objectType);
+ LIR_Opr tmp3 = new_register(objectType);
+ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
+}
+
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
-
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
- assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check();
- bool use_length = x->length() != NULL;
- bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
- bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
- !get_jobject_constant(x->value())->is_null_object() ||
- x->should_profile());
-
- LIRItem array(x->array(), this);
- LIRItem index(x->index(), this);
- LIRItem value(x->value(), this);
- LIRItem length(this);
-
- array.load_item();
- index.load_nonconstant();
-
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
-
- }
- if (needs_store_check || x->check_boolean()) {
- value.load_item();
- } else {
- value.load_for_store(x->elt_type());
- }
-
- set_no_result(x);
-
- // the CodeEmitInfo must be duplicated for each different
- // LIR-instruction because spilling can occur anywhere between two
- // instructions and so the debug information must be different
- CodeEmitInfo* range_check_info = state_for(x);
- CodeEmitInfo* null_check_info = NULL;
- if (x->needs_null_check()) {
- null_check_info = new CodeEmitInfo(range_check_info);
- }
-
- // emit array address setup early so it schedules better
- // FIXME? No harm in this on aarch64, and it might help
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
- if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
- __ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
- } else {
- array_range_check(array.result(), index.result(), null_check_info, range_check_info);
- // range_check also does the null check
- null_check_info = NULL;
- }
- }
-
- if (GenerateArrayStoreCheck && needs_store_check) {
- LIR_Opr tmp1 = new_register(objectType);
- LIR_Opr tmp2 = new_register(objectType);
- LIR_Opr tmp3 = new_register(objectType);
-
- CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
- __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
- }
-
- if (obj_store) {
- // Needs GC write barriers.
- pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- __ move(value.result(), array_addr, null_check_info);
- // Seems to be a precise
- post_barrier(LIR_OprFact::address(array_addr), value.result());
- } else {
- LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
- __ move(result, array_addr, null_check_info);
- }
-}
-
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
LIRItem obj(x->obj(), this);
@@ -771,76 +705,42 @@
}
}
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
- assert(x->number_of_arguments() == 4, "wrong type");
- LIRItem obj (x->argument_at(0), this); // object
- LIRItem offset(x->argument_at(1), this); // offset of field
- LIRItem cmp (x->argument_at(2), this); // value to compare with field
- LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
-
- assert(obj.type()->tag() == objectTag, "invalid type");
-
- // In 64bit the type can be long, sparc doesn't have this assert
- // assert(offset.type()->tag() == intTag, "invalid type");
-
- assert(cmp.type()->tag() == type->tag(), "invalid type");
- assert(val.type()->tag() == type->tag(), "invalid type");
-
- // get address of field
- obj.load_item();
- offset.load_nonconstant();
- val.load_item();
- cmp.load_item();
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+ LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
+ new_value.load_item();
+ cmp_value.load_item();
+ LIR_Opr result = new_register(T_INT);
+ if (type == T_OBJECT || type == T_ARRAY) {
+ __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
+ } else if (type == T_INT) {
+ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
+ } else if (type == T_LONG) {
+ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
+ } else {
+ ShouldNotReachHere();
+ Unimplemented();
+ }
+ __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
+ return result;
+}
- LIR_Address* a;
- if(offset.result()->is_constant()) {
- jlong c = offset.result()->as_jlong();
- if ((jlong)((jint)c) == c) {
- a = new LIR_Address(obj.result(),
- (jint)c,
- as_BasicType(type));
- } else {
- LIR_Opr tmp = new_register(T_LONG);
- __ move(offset.result(), tmp);
- a = new LIR_Address(obj.result(),
- tmp,
- as_BasicType(type));
- }
- } else {
- a = new LIR_Address(obj.result(),
- offset.result(),
- 0,
- as_BasicType(type));
- }
- LIR_Opr addr = new_pointer_register();
- __ leal(LIR_OprFact::address(a), addr);
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+ bool is_oop = type == T_OBJECT || type == T_ARRAY;
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+ LIR_Opr tmp = new_register(T_INT);
+ __ xchg(addr, value.result(), result, tmp);
+ return result;
+}
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Do the pre-write barrier, if any.
- pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
-
- LIR_Opr result = rlock_result(x);
-
- LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
- if (type == objectType)
- __ cas_obj(addr, cmp.result(), val.result(), new_register(T_INT), new_register(T_INT),
- result);
- else if (type == intType)
- __ cas_int(addr, cmp.result(), val.result(), ill, ill);
- else if (type == longType)
- __ cas_long(addr, cmp.result(), val.result(), ill, ill);
- else {
- ShouldNotReachHere();
- }
-
- __ logical_xor(FrameMap::r8_opr, LIR_OprFact::intConst(1), result);
-
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Seems to be precise
- post_barrier(addr, val.result());
- }
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
+ LIR_Opr tmp = new_register(T_INT);
+ __ xadd(addr, value.result(), result, tmp);
+ return result;
}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
@@ -1433,84 +1333,3 @@
__ volatile_load_mem_reg(address, result, info);
}
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
- BasicType type, bool is_volatile) {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- __ load(addr, dst);
-}
-
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
- BasicType type, bool is_volatile) {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- if (is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- __ move(data, addr);
- assert(src->is_register(), "must be register");
- // Seems to be a precise address
- post_barrier(LIR_OprFact::address(addr), data);
- } else {
- __ move(data, addr);
- }
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
- BasicType type = x->basic_type();
- LIRItem src(x->object(), this);
- LIRItem off(x->offset(), this);
- LIRItem value(x->value(), this);
-
- src.load_item();
- off.load_nonconstant();
-
- // We can cope with a constant increment in an xadd
- if (! (x->is_add()
- && value.is_constant()
- && can_inline_as_constant(x->value()))) {
- value.load_item();
- }
-
- LIR_Opr dst = rlock_result(x, type);
- LIR_Opr data = value.result();
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- LIR_Opr offset = off.result();
-
- if (data == dst) {
- LIR_Opr tmp = new_register(data->type());
- __ move(data, tmp);
- data = tmp;
- }
-
- LIR_Address* addr;
- if (offset->is_constant()) {
- jlong l = offset->as_jlong();
- assert((jlong)((jint)l) == l, "offset too large for constant");
- jint c = (jint)l;
- addr = new LIR_Address(src.result(), c, type);
- } else {
- addr = new LIR_Address(src.result(), offset, type);
- }
-
- LIR_Opr tmp = new_register(T_INT);
- LIR_Opr ptr = LIR_OprFact::illegalOpr;
-
- if (x->is_add()) {
- __ xadd(LIR_OprFact::address(addr), data, dst, tmp);
- } else {
- if (is_obj) {
- // Do the pre-write barrier, if any.
- ptr = new_pointer_register();
- __ add(src.result(), off.result(), ptr);
- pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- __ xchg(LIR_OprFact::address(addr), data, dst, tmp);
- if (is_obj) {
- post_barrier(ptr, data);
- }
- }
-}
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -358,6 +358,16 @@
void C1_MacroAssembler::verified_entry() {
}
+void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
+ // rbp, + 0: link
+ // + 1: return address
+ // + 2: argument with offset 0
+ // + 3: argument with offset 1
+ // + 4: ...
+
+ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
+}
+
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -109,4 +109,6 @@
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
+ void load_parameter(int offset_in_words, Register reg);
+
#endif // CPU_AARCH64_VM_C1_MACROASSEMBLER_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -43,11 +43,6 @@
#include "runtime/vframe.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_aarch64.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
// Implementation of StubAssembler
@@ -173,31 +168,32 @@
~StubFrame();
};;
+void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
+ set_info(name, must_gc_arguments);
+ enter();
+}
+
+void StubAssembler::epilogue() {
+ leave();
+ ret(lr);
+}
#define __ _sasm->
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
_sasm = sasm;
- __ set_info(name, must_gc_arguments);
- __ enter();
+ __ prologue(name, must_gc_arguments);
}
// load parameters that were stored with LIR_Assembler::store_parameter
// Note: offsets for store_parameter and load_argument must match
void StubFrame::load_argument(int offset_in_words, Register reg) {
- // rbp, + 0: link
- // + 1: return address
- // + 2: argument with offset 0
- // + 3: argument with offset 1
- // + 4: ...
-
- __ ldr(reg, Address(rfp, (offset_in_words + 2) * BytesPerWord));
+ __ load_parameter(offset_in_words, reg);
}
StubFrame::~StubFrame() {
- __ leave();
- __ ret(lr);
+ __ epilogue();
}
#undef __
@@ -1100,136 +1096,6 @@
}
break;
-#if INCLUDE_ALL_GCS
-
- case g1_pre_barrier_slow_id:
- {
- StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
- // arg0 : previous value of memory
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ mov(r0, (int)id);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
- __ should_not_reach_here();
- break;
- }
-
- const Register pre_val = r0;
- const Register thread = rthread;
- const Register tmp = rscratch1;
-
- Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
- Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
- Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
-
- Label done;
- Label runtime;
-
- // Is marking still active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ ldrw(tmp, in_progress);
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ ldrb(tmp, in_progress);
- }
- __ cbzw(tmp, done);
-
- // Can we store original value in the thread's buffer?
- __ ldr(tmp, queue_index);
- __ cbz(tmp, runtime);
-
- __ sub(tmp, tmp, wordSize);
- __ str(tmp, queue_index);
- __ ldr(rscratch2, buffer);
- __ add(tmp, tmp, rscratch2);
- f.load_argument(0, rscratch2);
- __ str(rscratch2, Address(tmp, 0));
- __ b(done);
-
- __ bind(runtime);
- __ push_call_clobbered_registers();
- f.load_argument(0, pre_val);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
- __ pop_call_clobbered_registers();
- __ bind(done);
- }
- break;
- case g1_post_barrier_slow_id:
- {
- StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ mov(r0, (int)id);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), r0);
- __ should_not_reach_here();
- break;
- }
-
- // arg0: store_address
- Address store_addr(rfp, 2*BytesPerWord);
-
- Label done;
- Label runtime;
-
- // At this point we know new_value is non-NULL and the new_value crosses regions.
- // Must check to see if card is already dirty
-
- const Register thread = rthread;
-
- Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
- Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
-
- const Register card_offset = rscratch2;
- // LR is free here, so we can use it to hold the byte_map_base.
- const Register byte_map_base = lr;
-
- assert_different_registers(card_offset, byte_map_base, rscratch1);
-
- f.load_argument(0, card_offset);
- __ lsr(card_offset, card_offset, CardTable::card_shift);
- __ load_byte_map_base(byte_map_base);
- __ ldrb(rscratch1, Address(byte_map_base, card_offset));
- __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
- __ br(Assembler::EQ, done);
-
- assert((int)CardTable::dirty_card_val() == 0, "must be 0");
-
- __ membar(Assembler::StoreLoad);
- __ ldrb(rscratch1, Address(byte_map_base, card_offset));
- __ cbzw(rscratch1, done);
-
- // storing region crossing non-NULL, card is clean.
- // dirty card and log.
- __ strb(zr, Address(byte_map_base, card_offset));
-
- // Convert card offset into an address in card_addr
- Register card_addr = card_offset;
- __ add(card_addr, byte_map_base, card_addr);
-
- __ ldr(rscratch1, queue_index);
- __ cbz(rscratch1, runtime);
- __ sub(rscratch1, rscratch1, wordSize);
- __ str(rscratch1, queue_index);
-
- // Reuse LR to hold buffer_addr
- const Register buffer_addr = lr;
-
- __ ldr(buffer_addr, buffer);
- __ str(card_addr, Address(buffer_addr, rscratch1));
- __ b(done);
-
- __ bind(runtime);
- __ push_call_clobbered_registers();
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
- __ pop_call_clobbered_registers();
- __ bind(done);
-
- }
- break;
-#endif
-
case predicate_failed_trap_id:
{
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -24,6 +24,9 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
@@ -307,4 +310,167 @@
}
+#ifdef COMPILER1
+
#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+
+ __ bind(*stub->entry());
+
+ assert(stub->pre_val()->is_register(), "Precondition.");
+
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+ __ cbz(pre_val_reg, *stub->continuation());
+ ce->store_parameter(stub->pre_val()->as_register(), 0);
+ __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
+ __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register new_val_reg = stub->new_val()->as_register();
+ __ cbz(new_val_reg, *stub->continuation());
+ ce->store_parameter(stub->addr()->as_pointer_register(), 0);
+ __ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
+ __ b(*stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ __ prologue("g1_pre_barrier", false);
+
+ // arg0 : previous value of memory
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+
+ const Register pre_val = r0;
+ const Register thread = rthread;
+ const Register tmp = rscratch1;
+
+ Address in_progress(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
+ Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ Label done;
+ Label runtime;
+
+ // Is marking still active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ ldrw(tmp, in_progress);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ ldrb(tmp, in_progress);
+ }
+ __ cbzw(tmp, done);
+
+ // Can we store original value in the thread's buffer?
+ __ ldr(tmp, queue_index);
+ __ cbz(tmp, runtime);
+
+ __ sub(tmp, tmp, wordSize);
+ __ str(tmp, queue_index);
+ __ ldr(rscratch2, buffer);
+ __ add(tmp, tmp, rscratch2);
+ __ load_parameter(0, rscratch2);
+ __ str(rscratch2, Address(tmp, 0));
+ __ b(done);
+
+ __ bind(runtime);
+ __ push_call_clobbered_registers();
+ __ load_parameter(0, pre_val);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+ __ pop_call_clobbered_registers();
+ __ bind(done);
+
+ __ epilogue();
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ __ prologue("g1_post_barrier", false);
+
+ // arg0: store_address
+ Address store_addr(rfp, 2*BytesPerWord);
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ Label done;
+ Label runtime;
+
+ // At this point we know new_value is non-NULL and the new_value crosses regions.
+ // Must check to see if card is already dirty
+
+ const Register thread = rthread;
+
+ Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
+ Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
+
+ const Register card_offset = rscratch2;
+ // LR is free here, so we can use it to hold the byte_map_base.
+ const Register byte_map_base = lr;
+
+ assert_different_registers(card_offset, byte_map_base, rscratch1);
+
+ __ load_parameter(0, card_offset);
+ __ lsr(card_offset, card_offset, CardTable::card_shift);
+ __ load_byte_map_base(byte_map_base);
+ __ ldrb(rscratch1, Address(byte_map_base, card_offset));
+ __ cmpw(rscratch1, (int)G1CardTable::g1_young_card_val());
+ __ br(Assembler::EQ, done);
+
+ assert((int)CardTable::dirty_card_val() == 0, "must be 0");
+
+ __ membar(Assembler::StoreLoad);
+ __ ldrb(rscratch1, Address(byte_map_base, card_offset));
+ __ cbzw(rscratch1, done);
+
+ // storing region crossing non-NULL, card is clean.
+ // dirty card and log.
+ __ strb(zr, Address(byte_map_base, card_offset));
+
+ // Convert card offset into an address in card_addr
+ Register card_addr = card_offset;
+ __ add(card_addr, byte_map_base, card_addr);
+
+ __ ldr(rscratch1, queue_index);
+ __ cbz(rscratch1, runtime);
+ __ sub(rscratch1, rscratch1, wordSize);
+ __ str(rscratch1, queue_index);
+
+ // Reuse LR to hold buffer_addr
+ const Register buffer_addr = lr;
+
+ __ ldr(buffer_addr, buffer);
+ __ str(card_addr, Address(buffer_addr, rscratch1));
+ __ b(done);
+
+ __ bind(runtime);
+ __ push_call_clobbered_registers();
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+ __ pop_call_clobbered_registers();
+ __ bind(done);
+ __ epilogue();
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -27,6 +27,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@@ -54,6 +60,14 @@
Address dst, Register val, Register tmp1, Register tmp2);
public:
+#ifdef COMPILER1
+ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread);
};
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
+#include "runtime/jniHandles.hpp"
#define __ masm->
@@ -64,3 +65,10 @@
default: Unimplemented();
}
}
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath) {
+ // If mask changes we need to ensure that the inverse is still encodable as an immediate
+ STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
+ __ andr(robj, robj, ~JNIHandles::weak_tag_mask);
+ __ ldr(robj, Address(robj, 0)); // *obj
+}
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -40,6 +40,8 @@
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
+ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath);
+
virtual void barrier_stubs_init() {}
};
--- a/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -56,7 +56,7 @@
__ strb(zr, Address(obj, rscratch1));
__ bind(L_already_dirty);
} else {
- if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
+ if (ct->scanned_concurrently()) {
__ membar(Assembler::StoreStore);
}
__ strb(zr, Address(obj, rscratch1));
@@ -79,7 +79,7 @@
const Register count = end; // 'end' register contains bytes count now
__ load_byte_map_base(scratch);
__ add(start, start, scratch);
- if (UseConcMarkSweepGC) {
+ if (ct->scanned_concurrently()) {
__ membar(__ StoreStore);
}
__ bind(L_loop);
--- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,6 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
@@ -82,11 +84,9 @@
// robj ^ rcounter ^ rcounter == robj
// robj is address dependent on rcounter.
- // If mask changes we need to ensure that the inverse is still encodable as an immediate
- STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
- __ andr(robj, robj, ~JNIHandles::weak_tag_mask);
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->try_resolve_jobject_in_native(masm, robj, rscratch1, slow);
- __ ldr(robj, Address(robj, 0)); // *obj
__ lsr(roffset, c_rarg2, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
@@ -177,4 +177,3 @@
address JNI_FastGetField::generate_fast_get_double_field() {
return generate_fast_get_int_field0(T_DOUBLE);
}
-
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -30,6 +30,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#define __ _masm->
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,9 +33,6 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_arm.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@@ -466,45 +463,4 @@
__ b(_continuation);
}
-/////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
- // At this point we know that marking is in progress.
- // If do_load() is true then we have to emit the
- // load of the previous value; otherwise it has already
- // been loaded into _pre_val.
-
- __ bind(_entry);
- assert(pre_val()->is_register(), "Precondition.");
-
- Register pre_val_reg = pre_val()->as_register();
-
- if (do_load()) {
- ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
- }
-
- __ cbz(pre_val_reg, _continuation);
- ce->verify_reserved_argument_area_size(1);
- __ str(pre_val_reg, Address(SP));
- __ call(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id), relocInfo::runtime_call_type);
-
- __ b(_continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
- assert(addr()->is_register(), "Precondition.");
- assert(new_val()->is_register(), "Precondition.");
- Register new_val_reg = new_val()->as_register();
- __ cbz(new_val_reg, _continuation);
- ce->verify_reserved_argument_area_size(1);
- __ str(addr()->as_pointer_register(), Address(SP));
- __ call(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id), relocInfo::runtime_call_type);
- __ b(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-/////////////////////////////////////////////////////////////////////////////
-
#undef __
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -34,6 +34,7 @@
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
#include "ci/ciUtilities.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -542,88 +543,17 @@
}
}
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+ LIR_Opr tmp1 = FrameMap::R0_oop_opr;
+ LIR_Opr tmp2 = FrameMap::R1_oop_opr;
+ LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
+ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
+}
+
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
-
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
- assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check();
- bool use_length = x->length() != NULL;
- bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
- bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
- !get_jobject_constant(x->value())->is_null_object() ||
- x->should_profile());
-
- LIRItem array(x->array(), this);
- LIRItem index(x->index(), this);
- LIRItem value(x->value(), this);
- LIRItem length(this);
-
- array.load_item();
- index.load_nonconstant();
-
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
- if (needs_store_check || x->check_boolean()) {
- value.load_item();
- } else {
- value.load_for_store(x->elt_type());
- }
-
- set_no_result(x);
-
- // the CodeEmitInfo must be duplicated for each different
- // LIR-instruction because spilling can occur anywhere between two
- // instructions and so the debug information must be different
- CodeEmitInfo* range_check_info = state_for(x);
- CodeEmitInfo* null_check_info = NULL;
- if (x->needs_null_check()) {
- null_check_info = new CodeEmitInfo(range_check_info);
- }
-
- // emit array address setup early so it schedules better
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
- if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
- __ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
- } else {
- array_range_check(array.result(), index.result(), null_check_info, range_check_info);
- // range_check also does the null check
- null_check_info = NULL;
- }
- }
-
- if (GenerateArrayStoreCheck && needs_store_check) {
- LIR_Opr tmp1 = FrameMap::R0_oop_opr;
- LIR_Opr tmp2 = FrameMap::R1_oop_opr;
- CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
- __ store_check(value.result(), array.result(), tmp1, tmp2,
- LIR_OprFact::illegalOpr, store_check_info,
- x->profiled_method(), x->profiled_bci());
- }
-
-#if INCLUDE_ALL_GCS
- if (obj_store) {
- // Needs GC write barriers.
- pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
-#endif // INCLUDE_ALL_GCS
-
- LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
- __ move(result, array_addr, null_check_info);
- if (obj_store) {
- post_barrier(LIR_OprFact::address(array_addr), value.result());
- }
-}
-
-
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
LIRItem obj(x->obj(), this);
@@ -1060,56 +990,52 @@
#endif // __SOFTFP__
}
-
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
- assert(x->number_of_arguments() == 4, "wrong type");
- LIRItem obj (x->argument_at(0), this); // object
- LIRItem offset(x->argument_at(1), this); // offset of field
- LIRItem cmp (x->argument_at(2), this); // value to compare with field
- LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
-
- LIR_Opr addr = new_pointer_register();
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+ LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
LIR_Opr tmp1 = LIR_OprFact::illegalOpr;
LIR_Opr tmp2 = LIR_OprFact::illegalOpr;
-
- // get address of field
- obj.load_item();
- offset.load_item();
- cmp.load_item();
- val.load_item();
-
- __ add(obj.result(), offset.result(), addr);
- LIR_Opr result = rlock_result(x);
-
- if (type == objectType) {
-#if INCLUDE_ALL_GCS
- // Do the pre-write barrier, if any.
- pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
-#endif // INCLUDE_ALL_GCS
+ new_value.load_item();
+ cmp_value.load_item();
+ LIR_Opr result = new_register(T_INT);
+ if (type == T_OBJECT || type == T_ARRAY) {
#ifdef AARCH64
if (UseCompressedOops) {
tmp1 = new_pointer_register();
tmp2 = new_pointer_register();
}
-#endif // AARCH64
- __ cas_obj(addr, cmp.result(), val.result(), tmp1, tmp2, result);
- post_barrier(addr, val.result());
- }
- else if (type == intType) {
- __ cas_int(addr, cmp.result(), val.result(), tmp1, tmp1, result);
- }
- else if (type == longType) {
+#endif
+ __ cas_obj(addr, cmp_value.result(), new_value.result(), new_register(T_INT), new_register(T_INT), result);
+ } else if (type == T_INT) {
+ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp1, result);
+ } else if (type == T_LONG) {
#ifndef AARCH64
tmp1 = new_register(T_LONG);
#endif // !AARCH64
- __ cas_long(addr, cmp.result(), val.result(), tmp1, tmp2, result);
- }
- else {
+ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), tmp1, tmp2, result);
+ } else {
ShouldNotReachHere();
}
+ return result;
}
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+ bool is_oop = type == T_OBJECT || type == T_ARRAY;
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+ LIR_Opr tmp = (UseCompressedOops && is_oop) ? new_pointer_register() : LIR_OprFact::illegalOpr;
+ __ xchg(addr_ptr, data, dst, tmp);
+ return result;
+}
+
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ assert(type == T_INT LP64_ONLY( || type == T_LONG), "unexpected type");
+ LIR_Opr tmp = new_register(type);
+ __ xadd(addr, value.result(), result, tmp);
+ return result;
+}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
address runtime_func;
@@ -1669,110 +1595,3 @@
// TODO-AARCH64 implement with ldar instruction
__ load(address, result, info, lir_patch_none);
}
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
- BasicType type, bool is_volatile) {
-#ifdef AARCH64
- __ load(new LIR_Address(src, offset, type), dst);
-#else
- assert(offset->is_single_cpu(), "must be");
- if (is_volatile && dst->is_double_cpu()) {
- LIR_Opr tmp = new_pointer_register();
- __ add(src, offset, tmp);
- __ volatile_load_mem_reg(new LIR_Address(tmp, (intx)0, type), dst, NULL);
- } else if (type == T_FLOAT || type == T_DOUBLE) {
- // fld doesn't have indexed addressing mode
- LIR_Opr tmp = new_register(T_INT);
- __ add(src, offset, tmp);
- __ load(new LIR_Address(tmp, (intx)0, type), dst);
- } else {
- __ load(new LIR_Address(src, offset, type), dst);
- }
-#endif // AARCH64
-}
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
- BasicType type, bool is_volatile) {
-#ifdef AARCH64
- LIR_Address* addr = new LIR_Address(src, offset, type);
- if (type == T_ARRAY || type == T_OBJECT) {
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- __ move(data, addr);
- assert(src->is_register(), "must be register");
- post_barrier(LIR_OprFact::address(addr), data);
- } else {
- __ move(data, addr);
- }
-#else
- assert(offset->is_single_cpu(), "must be");
- if (is_volatile && data->is_double_cpu()) {
- LIR_Opr tmp = new_register(T_INT);
- __ add(src, offset, tmp);
- __ volatile_store_mem_reg(data, new LIR_Address(tmp, (intx)0, type), NULL);
- } else if (type == T_FLOAT || type == T_DOUBLE) {
- // fst doesn't have indexed addressing mode
- LIR_Opr tmp = new_register(T_INT);
- __ add(src, offset, tmp);
- __ move(data, new LIR_Address(tmp, (intx)0, type));
- } else {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-#if INCLUDE_ALL_GCS
- if (is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
-#endif // INCLUDE_ALL_GCS
- __ move(data, addr);
- if (is_obj) {
- assert(src->is_register(), "must be register");
- post_barrier(LIR_OprFact::address(addr), data);
- }
- }
-#endif // AARCH64
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
- BasicType type = x->basic_type();
- LIRItem src(x->object(), this);
- LIRItem off(x->offset(), this);
- LIRItem value(x->value(), this);
-
- src.load_item();
- if (x->is_add()) {
- value.load_nonconstant();
- } else {
- value.load_item();
- }
- off.load_nonconstant();
-
- LIR_Opr dst = rlock_result(x, type);
- LIR_Opr data = value.result();
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-
- assert (type == T_INT || type == T_LONG || (!x->is_add() && is_obj), "unexpected type");
- LIR_Opr addr_ptr = new_pointer_register();
-
- __ add(src.result(), off.result(), addr_ptr);
-
- LIR_Address* addr = new LIR_Address(addr_ptr, (intx)0, type);
-
- if (x->is_add()) {
- LIR_Opr tmp = new_register(type);
- __ xadd(addr_ptr, data, dst, tmp);
- } else {
- LIR_Opr tmp = (UseCompressedOops && is_obj) ? new_pointer_register() : LIR_OprFact::illegalOpr;
- if (is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- __ xchg(addr_ptr, data, dst, tmp);
- if (is_obj) {
- // Seems to be a precise address
- post_barrier(LIR_OprFact::address(addr), data);
- }
- }
-}
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -42,11 +42,6 @@
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
#include "vmreg_arm.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
// Note: Rtemp usage is this file should not impact C2 and should be
// correct as long as it is not implicitly used in lower layers (the
@@ -356,6 +351,13 @@
restore_live_registers(sasm, true, true, false, restore_fpu_registers);
}
+void StubAssembler::save_live_registers() {
+ save_live_registers(this);
+}
+
+void StubAssembler::restore_live_registers_without_return() {
+ restore_live_registers_without_return(this);
+}
void Runtime1::initialize_pd() {
}
@@ -533,201 +535,6 @@
}
break;
-#if INCLUDE_ALL_GCS
- case g1_pre_barrier_slow_id:
- {
- // Input:
- // - pre_val pushed on the stack
-
- __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ mov(R0, (int)id);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
- __ should_not_reach_here();
- break;
- }
-
- // save at least the registers that need saving if the runtime is called
-#ifdef AARCH64
- __ raw_push(R0, R1);
- __ raw_push(R2, R3);
- const int nb_saved_regs = 4;
-#else // AARCH64
- const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
- const int nb_saved_regs = 6;
- assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
- __ push(saved_regs);
-#endif // AARCH64
-
- const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
- const Register r_index_1 = R1;
- const Register r_buffer_2 = R2;
-
- Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
- Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
- Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
-
- Label done;
- Label runtime;
-
- // Is marking still active?
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ ldrb(R1, queue_active);
- __ cbz(R1, done);
-
- __ ldr(r_index_1, queue_index);
- __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
- __ ldr(r_buffer_2, buffer);
-
- __ subs(r_index_1, r_index_1, wordSize);
- __ b(runtime, lt);
-
- __ str(r_index_1, queue_index);
- __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
-
- __ bind(done);
-
-#ifdef AARCH64
- __ raw_pop(R2, R3);
- __ raw_pop(R0, R1);
-#else // AARCH64
- __ pop(saved_regs);
-#endif // AARCH64
-
- __ ret();
-
- __ bind(runtime);
-
- save_live_registers(sasm);
-
- assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
- __ mov(c_rarg1, Rthread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
-
- restore_live_registers_without_return(sasm);
-
- __ b(done);
- }
- break;
- case g1_post_barrier_slow_id:
- {
- // Input:
- // - store_addr, pushed on the stack
-
- __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ mov(R0, (int)id);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
- __ should_not_reach_here();
- break;
- }
-
- Label done;
- Label recheck;
- Label runtime;
-
- Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
- Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
-
- AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
-
- // save at least the registers that need saving if the runtime is called
-#ifdef AARCH64
- __ raw_push(R0, R1);
- __ raw_push(R2, R3);
- const int nb_saved_regs = 4;
-#else // AARCH64
- const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
- const int nb_saved_regs = 6;
- assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
- __ push(saved_regs);
-#endif // AARCH64
-
- const Register r_card_addr_0 = R0; // must be R0 for the slow case
- const Register r_obj_0 = R0;
- const Register r_card_base_1 = R1;
- const Register r_tmp2 = R2;
- const Register r_index_2 = R2;
- const Register r_buffer_3 = R3;
- const Register tmp1 = Rtemp;
-
- __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
- // Note: there is a comment in x86 code about not using
- // ExternalAddress / lea, due to relocation not working
- // properly for that address. Should be OK for arm, where we
- // explicitly specify that 'cardtable' has a relocInfo::none
- // type.
- __ lea(r_card_base_1, cardtable);
- __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
-
- // first quick check without barrier
- __ ldrb(r_tmp2, Address(r_card_addr_0));
-
- __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
- __ b(recheck, ne);
-
- __ bind(done);
-
-#ifdef AARCH64
- __ raw_pop(R2, R3);
- __ raw_pop(R0, R1);
-#else // AARCH64
- __ pop(saved_regs);
-#endif // AARCH64
-
- __ ret();
-
- __ bind(recheck);
-
- __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
-
- // reload card state after the barrier that ensures the stored oop was visible
- __ ldrb(r_tmp2, Address(r_card_addr_0));
-
- assert(CardTable::dirty_card_val() == 0, "adjust this code");
- __ cbz(r_tmp2, done);
-
- // storing region crossing non-NULL, card is clean.
- // dirty card and log.
-
- assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
- if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
- // Card table is aligned so the lowest byte of the table address base is zero.
- __ strb(r_card_base_1, Address(r_card_addr_0));
- } else {
- __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
- }
-
- __ ldr(r_index_2, queue_index);
- __ ldr(r_buffer_3, buffer);
-
- __ subs(r_index_2, r_index_2, wordSize);
- __ b(runtime, lt); // go to runtime if now negative
-
- __ str(r_index_2, queue_index);
-
- __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
-
- __ b(done);
-
- __ bind(runtime);
-
- save_live_registers(sasm);
-
- assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
- __ mov(c_rarg1, Rthread);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
-
- restore_live_registers_without_return(sasm);
-
- __ b(done);
- }
- break;
-#endif // INCLUDE_ALL_GCS
case new_instance_id:
case fast_new_instance_id:
case fast_new_instance_init_check_id:
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -32,6 +32,11 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
#define __ masm->
@@ -120,3 +125,243 @@
#endif // !R9_IS_SCRATCHED
#endif // !AARCH64
}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+
+ __ bind(*stub->entry());
+ assert(stub->pre_val()->is_register(), "Precondition.");
+
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+
+ __ cbz(pre_val_reg, *stub->continuation());
+ ce->verify_reserved_argument_area_size(1);
+ __ str(pre_val_reg, Address(SP));
+ __ call(bs->pre_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
+
+ __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register new_val_reg = stub->new_val()->as_register();
+ __ cbz(new_val_reg, *stub->continuation());
+ ce->verify_reserved_argument_area_size(1);
+ __ str(stub->addr()->as_pointer_register(), Address(SP));
+ __ call(bs->post_barrier_c1_runtime_code_blob()->code_begin(), relocInfo::runtime_call_type);
+ __ b(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ // Input:
+ // - pre_val pushed on the stack
+
+ __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
+ __ mov(R0, (int)id);
+ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
+ __ should_not_reach_here();
+ break;
+ }
+
+ // save at least the registers that need saving if the runtime is called
+#ifdef AARCH64
+ __ raw_push(R0, R1);
+ __ raw_push(R2, R3);
+ const int nb_saved_regs = 4;
+#else // AARCH64
+ const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
+ const int nb_saved_regs = 6;
+ assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
+ __ push(saved_regs);
+#endif // AARCH64
+
+ const Register r_pre_val_0 = R0; // must be R0, to be ready for the runtime call
+ const Register r_index_1 = R1;
+ const Register r_buffer_2 = R2;
+
+ Address queue_active(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
+ Address queue_index(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(Rthread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ Label done;
+ Label runtime;
+
+ // Is marking still active?
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ ldrb(R1, queue_active);
+ __ cbz(R1, done);
+
+ __ ldr(r_index_1, queue_index);
+ __ ldr(r_pre_val_0, Address(SP, nb_saved_regs*wordSize));
+ __ ldr(r_buffer_2, buffer);
+
+ __ subs(r_index_1, r_index_1, wordSize);
+ __ b(runtime, lt);
+
+ __ str(r_index_1, queue_index);
+ __ str(r_pre_val_0, Address(r_buffer_2, r_index_1));
+
+ __ bind(done);
+
+#ifdef AARCH64
+ __ raw_pop(R2, R3);
+ __ raw_pop(R0, R1);
+#else // AARCH64
+ __ pop(saved_regs);
+#endif // AARCH64
+
+ __ ret();
+
+ __ bind(runtime);
+
+ __ save_live_registers();
+
+ assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
+ __ mov(c_rarg1, Rthread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
+
+ __ restore_live_registers_without_return();
+
+ __ b(done);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ // Input:
+ // - store_addr, pushed on the stack
+
+ __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+ if (bs->kind() != BarrierSet::G1BarrierSet) {
+ __ mov(R0, (int)id);
+ __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), R0);
+ __ should_not_reach_here();
+ break;
+ }
+
+ Label done;
+ Label recheck;
+ Label runtime;
+
+ Address queue_index(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
+ Address buffer(Rthread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
+
+ AddressLiteral cardtable(ci_card_table_address_as<address>(), relocInfo::none);
+
+ // save at least the registers that need saving if the runtime is called
+#ifdef AARCH64
+ __ raw_push(R0, R1);
+ __ raw_push(R2, R3);
+ const int nb_saved_regs = 4;
+#else // AARCH64
+ const RegisterSet saved_regs = RegisterSet(R0,R3) | RegisterSet(R12) | RegisterSet(LR);
+ const int nb_saved_regs = 6;
+ assert(nb_saved_regs == saved_regs.size(), "fix nb_saved_regs");
+ __ push(saved_regs);
+#endif // AARCH64
+
+ const Register r_card_addr_0 = R0; // must be R0 for the slow case
+ const Register r_obj_0 = R0;
+ const Register r_card_base_1 = R1;
+ const Register r_tmp2 = R2;
+ const Register r_index_2 = R2;
+ const Register r_buffer_3 = R3;
+ const Register tmp1 = Rtemp;
+
+ __ ldr(r_obj_0, Address(SP, nb_saved_regs*wordSize));
+ // Note: there is a comment in x86 code about not using
+ // ExternalAddress / lea, due to relocation not working
+ // properly for that address. Should be OK for arm, where we
+ // explicitly specify that 'cardtable' has a relocInfo::none
+ // type.
+ __ lea(r_card_base_1, cardtable);
+ __ add(r_card_addr_0, r_card_base_1, AsmOperand(r_obj_0, lsr, CardTable::card_shift));
+
+ // first quick check without barrier
+ __ ldrb(r_tmp2, Address(r_card_addr_0));
+
+ __ cmp(r_tmp2, (int)G1CardTable::g1_young_card_val());
+ __ b(recheck, ne);
+
+ __ bind(done);
+
+#ifdef AARCH64
+ __ raw_pop(R2, R3);
+ __ raw_pop(R0, R1);
+#else // AARCH64
+ __ pop(saved_regs);
+#endif // AARCH64
+
+ __ ret();
+
+ __ bind(recheck);
+
+ __ membar(MacroAssembler::Membar_mask_bits(MacroAssembler::StoreLoad), tmp1);
+
+ // reload card state after the barrier that ensures the stored oop was visible
+ __ ldrb(r_tmp2, Address(r_card_addr_0));
+
+ assert(CardTable::dirty_card_val() == 0, "adjust this code");
+ __ cbz(r_tmp2, done);
+
+ // storing region crossing non-NULL, card is clean.
+ // dirty card and log.
+
+ assert(0 == (int)CardTable::dirty_card_val(), "adjust this code");
+ if ((ci_card_table_address_as<intptr_t>() & 0xff) == 0) {
+ // Card table is aligned so the lowest byte of the table address base is zero.
+ __ strb(r_card_base_1, Address(r_card_addr_0));
+ } else {
+ __ strb(__ zero_register(r_tmp2), Address(r_card_addr_0));
+ }
+
+ __ ldr(r_index_2, queue_index);
+ __ ldr(r_buffer_3, buffer);
+
+ __ subs(r_index_2, r_index_2, wordSize);
+ __ b(runtime, lt); // go to runtime if now negative
+
+ __ str(r_index_2, queue_index);
+
+ __ str(r_card_addr_0, Address(r_buffer_3, r_index_2));
+
+ __ b(done);
+
+ __ bind(runtime);
+
+ __ save_live_registers();
+
+ assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
+ __ mov(c_rarg1, Rthread);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
+
+ __ restore_live_registers_without_return();
+
+ __ b(done);
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -27,6 +27,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@@ -34,6 +40,14 @@
Register addr, Register count, int callee_saved_regs);
void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, Register tmp);
+
+#ifdef COMPILER1
+ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
};
#endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,9 +33,6 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@@ -470,58 +467,4 @@
__ b(_continuation);
}
-
-///////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
- // At this point we know that marking is in progress.
- // If do_load() is true then we have to emit the
- // load of the previous value; otherwise it has already
- // been loaded into _pre_val.
-
- __ bind(_entry);
-
- assert(pre_val()->is_register(), "Precondition.");
- Register pre_val_reg = pre_val()->as_register();
-
- if (do_load()) {
- ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
- }
-
- __ cmpdi(CCR0, pre_val_reg, 0);
- __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
-
- address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id);
- //__ load_const_optimized(R0, stub);
- __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
- __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
- __ mtctr(R0);
- __ bctrl();
- __ b(_continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
-
- assert(addr()->is_register(), "Precondition.");
- assert(new_val()->is_register(), "Precondition.");
- Register addr_reg = addr()->as_pointer_register();
- Register new_val_reg = new_val()->as_register();
-
- __ cmpdi(CCR0, new_val_reg, 0);
- __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), _continuation);
-
- address stub = Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id);
- //__ load_const_optimized(R0, stub);
- __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
- __ mtctr(R0);
- __ mr(R0, addr_reg); // Pass addr in R0.
- __ bctrl();
- __ b(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-///////////////////////////////////////////////////////////////////////////////////
-
#undef __
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -2978,7 +2978,9 @@
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) {
- const Register Rptr = src->as_pointer_register(),
+ const LIR_Address *addr = src->as_address_ptr();
+ assert(addr->disp() == 0 && addr->index()->is_illegal(), "use leal!");
+ const Register Rptr = addr->base()->as_pointer_register(),
Rtmp = tmp->as_register();
Register Rco = noreg;
if (UseCompressedOops && data->is_oop()) {
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -149,7 +149,12 @@
// Accumulate fixed displacements.
if (index->is_constant()) {
- large_disp += (intx)(index->as_constant_ptr()->as_jint()) << shift;
+ LIR_Const *constant = index->as_constant_ptr();
+ if (constant->type() == T_LONG) {
+ large_disp += constant->as_jlong() << shift;
+ } else {
+ large_disp += (intx)(constant->as_jint()) << shift;
+ }
index = LIR_OprFact::illegalOpr;
}
@@ -190,7 +195,7 @@
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
- BasicType type, bool needs_card_mark) {
+ BasicType type) {
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
@@ -230,13 +235,7 @@
__ add(index_opr, array_opr, base_opr);
}
}
- if (needs_card_mark) {
- LIR_Opr ptr = new_pointer_register();
- __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
- return new LIR_Address(ptr, type);
- } else {
- return new LIR_Address(base_opr, offset, type);
- }
+ return new LIR_Address(base_opr, offset, type);
}
@@ -320,80 +319,12 @@
// visitor functions
//----------------------------------------------------------------------
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
- assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check();
- bool use_length = x->length() != NULL;
- bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
- bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
- !get_jobject_constant(x->value())->is_null_object() ||
- x->should_profile());
-
- LIRItem array(x->array(), this);
- LIRItem index(x->index(), this);
- LIRItem value(x->value(), this);
- LIRItem length(this);
-
- array.load_item();
- index.load_nonconstant();
-
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
- if (needs_store_check || x->check_boolean()) {
- value.load_item();
- } else {
- value.load_for_store(x->elt_type());
- }
-
- set_no_result(x);
-
- // The CodeEmitInfo must be duplicated for each different
- // LIR-instruction because spilling can occur anywhere between two
- // instructions and so the debug information must be different.
- CodeEmitInfo* range_check_info = state_for(x);
- CodeEmitInfo* null_check_info = NULL;
- if (x->needs_null_check()) {
- null_check_info = new CodeEmitInfo(range_check_info);
- }
-
- // Emit array address setup early so it schedules better.
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
- if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
- __ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
- } else {
- array_range_check(array.result(), index.result(), null_check_info, range_check_info);
- // Range_check also does the null check.
- null_check_info = NULL;
- }
- }
-
- if (GenerateArrayStoreCheck && needs_store_check) {
- // Following registers are used by slow_subtype_check:
- LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
- LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
- LIR_Opr tmp3 = FrameMap::R6_opr; // temp
-
- CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
- __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3,
- store_check_info, x->profiled_method(), x->profiled_bci());
- }
-
- if (obj_store) {
- // Needs GC write barriers.
- pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
- __ move(result, array_addr, null_check_info);
- if (obj_store) {
- // Precise card mark.
- post_barrier(LIR_OprFact::address(array_addr), value.result());
- }
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+ // Following registers are used by slow_subtype_check:
+ LIR_Opr tmp1 = FrameMap::R4_opr; // super_klass
+ LIR_Opr tmp2 = FrameMap::R5_opr; // sub_klass
+ LIR_Opr tmp3 = FrameMap::R6_opr; // temp
+ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
@@ -702,24 +633,68 @@
}
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
- assert(x->number_of_arguments() == 4, "wrong type");
- LIRItem obj (x->argument_at(0), this); // object
- LIRItem offset(x->argument_at(1), this); // offset of field
- LIRItem cmp (x->argument_at(2), this); // Value to compare with field.
- LIRItem val (x->argument_at(3), this); // Replace field with val if matches cmp.
-
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+ LIR_Opr result = new_register(T_INT);
LIR_Opr t1 = LIR_OprFact::illegalOpr;
LIR_Opr t2 = LIR_OprFact::illegalOpr;
- LIR_Opr addr = new_pointer_register();
+ cmp_value.load_item();
+ new_value.load_item();
+
+ // Volatile load may be followed by Unsafe CAS.
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ membar();
+ } else {
+ __ membar_release();
+ }
+
+ if (type == T_OBJECT || type == T_ARRAY) {
+ if (UseCompressedOops) {
+ t1 = new_register(T_OBJECT);
+ t2 = new_register(T_OBJECT);
+ }
+ __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else if (type == T_INT) {
+ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else if (type == T_LONG) {
+ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else {
+ Unimplemented();
+ }
+ __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
+ result, type);
+ return result;
+}
+
- // Get address of field.
- obj.load_item();
- offset.load_item();
- cmp.load_item();
- val.load_item();
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+ LIR_Opr result = new_register(type);
+ LIR_Opr tmp = FrameMap::R0_opr;
+
+ value.load_item();
+
+ // Volatile load may be followed by Unsafe CAS.
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ membar();
+ } else {
+ __ membar_release();
+ }
+
+ __ xchg(addr, value.result(), result, tmp);
- __ add(obj.result(), offset.result(), addr);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ membar_acquire();
+ } else {
+ __ membar();
+ }
+ return result;
+}
+
+
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+ LIR_Opr result = new_register(type);
+ LIR_Opr tmp = FrameMap::R0_opr;
+
+ value.load_item();
// Volatile load may be followed by Unsafe CAS.
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
@@ -728,33 +703,14 @@
__ membar_release();
}
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Only cmp value can get overwritten, no do_load required.
- pre_barrier(LIR_OprFact::illegalOpr /* addr */, cmp.result() /* pre_val */,
- false /* do_load */, false /* patch */, NULL);
- }
+ __ xadd(addr, value.result(), result, tmp);
- if (type == objectType) {
- if (UseCompressedOops) {
- t1 = new_register(T_OBJECT);
- t2 = new_register(T_OBJECT);
- }
- __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
- } else if (type == intType) {
- __ cas_int(addr, cmp.result(), val.result(), t1, t2);
- } else if (type == longType) {
- __ cas_long(addr, cmp.result(), val.result(), t1, t2);
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ membar_acquire();
} else {
- ShouldNotReachHere();
+ __ membar();
}
- // Benerate conditional move of boolean result.
- LIR_Opr result = rlock_result(x);
- __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
- result, as_BasicType(type));
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Precise card mark since could either be object or array.
- post_barrier(addr, val.result());
- }
+ return result;
}
@@ -1255,110 +1211,6 @@
}
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
- BasicType type, bool is_volatile) {
- LIR_Opr base_op = src;
- LIR_Opr index_op = offset;
-
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-#ifndef _LP64
- if (is_volatile && type == T_LONG) {
- __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
- } else
-#endif
- {
- if (type == T_BOOLEAN) {
- type = T_BYTE;
- }
- LIR_Address* addr;
- if (type == T_ARRAY || type == T_OBJECT) {
- LIR_Opr tmp = new_pointer_register();
- __ add(base_op, index_op, tmp);
- addr = new LIR_Address(tmp, type);
- } else {
- addr = new LIR_Address(base_op, index_op, type);
- }
-
- if (is_obj) {
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
- }
- __ move(data, addr);
- if (is_obj) {
- // This address is precise.
- post_barrier(LIR_OprFact::address(addr), data);
- }
- }
-}
-
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
- BasicType type, bool is_volatile) {
-#ifndef _LP64
- if (is_volatile && type == T_LONG) {
- __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
- } else
-#endif
- {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- __ load(addr, dst);
- }
-}
-
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
- BasicType type = x->basic_type();
- LIRItem src(x->object(), this);
- LIRItem off(x->offset(), this);
- LIRItem value(x->value(), this);
-
- src.load_item();
- value.load_item();
- off.load_nonconstant();
-
- LIR_Opr dst = rlock_result(x, type);
- LIR_Opr data = value.result();
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
-
- LIR_Opr tmp = FrameMap::R0_opr;
- LIR_Opr ptr = new_pointer_register();
- __ add(src.result(), off.result(), ptr);
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar();
- } else {
- __ membar_release();
- }
-
- if (x->is_add()) {
- __ xadd(ptr, data, dst, tmp);
- } else {
- const bool can_move_barrier = true; // TODO: port GraphKit::can_move_pre_barrier() from C2
- if (!can_move_barrier && is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- __ xchg(ptr, data, dst, tmp);
- if (is_obj) {
- // Seems to be a precise address.
- post_barrier(ptr, data);
- if (can_move_barrier) {
- pre_barrier(LIR_OprFact::illegalOpr, dst /* pre_val */,
- false /* do_load */, false /* patch */, NULL);
- }
- }
- }
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ membar_acquire();
- } else {
- __ membar();
- }
-}
-
-
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -42,11 +42,6 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_ppc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
// Implementation of StubAssembler
@@ -708,164 +703,6 @@
}
break;
-#if INCLUDE_ALL_GCS
- case g1_pre_barrier_slow_id:
- {
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- goto unimplemented_entry;
- }
-
- __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
- // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
- const int stack_slots = 3;
- Register pre_val = R0; // previous value of memory
- Register tmp = R14;
- Register tmp2 = R15;
-
- Label refill, restart, marking_not_active;
- int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
- int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
- int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
-
- // Spill
- __ std(tmp, -16, R1_SP);
- __ std(tmp2, -24, R1_SP);
-
- // Is marking still active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
- }
- __ cmpdi(CCR0, tmp, 0);
- __ beq(CCR0, marking_not_active);
-
- __ bind(restart);
- // Load the index into the SATB buffer. SATBMarkQueue::_index is a
- // size_t so ld_ptr is appropriate.
- __ ld(tmp, satb_q_index_byte_offset, R16_thread);
-
- // index == 0?
- __ cmpdi(CCR0, tmp, 0);
- __ beq(CCR0, refill);
-
- __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
- __ ld(pre_val, -8, R1_SP); // Load from stack.
- __ addi(tmp, tmp, -oopSize);
-
- __ std(tmp, satb_q_index_byte_offset, R16_thread);
- __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
-
- __ bind(marking_not_active);
- // Restore temp registers and return-from-leaf.
- __ ld(tmp2, -24, R1_SP);
- __ ld(tmp, -16, R1_SP);
- __ blr();
-
- __ bind(refill);
- const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
- __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
- __ mflr(R0);
- __ std(R0, _abi(lr), R1_SP);
- __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
- __ pop_frame();
- __ ld(R0, _abi(lr), R1_SP);
- __ mtlr(R0);
- __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
- __ b(restart);
- }
- break;
-
- case g1_post_barrier_slow_id:
- {
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- goto unimplemented_entry;
- }
-
- __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
- // Using stack slots: spill addr, spill tmp2
- const int stack_slots = 2;
- Register tmp = R0;
- Register addr = R14;
- Register tmp2 = R15;
- jbyte* byte_map_base = ci_card_table_address();
-
- Label restart, refill, ret;
-
- // Spill
- __ std(addr, -8, R1_SP);
- __ std(tmp2, -16, R1_SP);
-
- __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
- __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
- __ add(addr, tmp2, addr);
- __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
-
- // Return if young card.
- __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
- __ beq(CCR0, ret);
-
- // Return if sequential consistent value is already dirty.
- __ membar(Assembler::StoreLoad);
- __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
-
- __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
- __ beq(CCR0, ret);
-
- // Not dirty.
-
- // First, dirty it.
- __ li(tmp, G1CardTable::dirty_card_val());
- __ stb(tmp, 0, addr);
-
- int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
- int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
-
- __ bind(restart);
-
- // Get the index into the update buffer. DirtyCardQueue::_index is
- // a size_t so ld_ptr is appropriate here.
- __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
-
- // index == 0?
- __ cmpdi(CCR0, tmp2, 0);
- __ beq(CCR0, refill);
-
- __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
- __ addi(tmp2, tmp2, -oopSize);
-
- __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
- __ add(tmp2, tmp, tmp2);
- __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
-
- // Restore temp registers and return-from-leaf.
- __ bind(ret);
- __ ld(tmp2, -16, R1_SP);
- __ ld(addr, -8, R1_SP);
- __ blr();
-
- __ bind(refill);
- const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
- __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
- __ mflr(R0);
- __ std(R0, _abi(lr), R1_SP);
- __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
- __ pop_frame();
- __ ld(R0, _abi(lr), R1_SP);
- __ mtlr(R0);
- __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
- __ b(restart);
- }
- break;
-#endif // INCLUDE_ALL_GCS
-
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);
@@ -889,7 +726,6 @@
break;
default:
- unimplemented_entry:
{
__ set_info("unimplemented entry", dont_gc_arguments);
__ mflr(R0);
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -26,12 +26,17 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
#define __ masm->
@@ -339,4 +344,209 @@
__ bind(done);
}
+#ifdef COMPILER1
+
#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+
+ __ bind(*stub->entry());
+
+ assert(stub->pre_val()->is_register(), "Precondition.");
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+
+ __ cmpdi(CCR0, pre_val_reg, 0);
+ __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
+
+ address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
+ //__ load_const_optimized(R0, c_code);
+ __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
+ __ std(pre_val_reg, -8, R1_SP); // Pass pre_val on stack.
+ __ mtctr(R0);
+ __ bctrl();
+ __ b(*stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register addr_reg = stub->addr()->as_pointer_register();
+ Register new_val_reg = stub->new_val()->as_register();
+
+ __ cmpdi(CCR0, new_val_reg, 0);
+ __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
+
+ address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
+ //__ load_const_optimized(R0, c_code);
+ __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(c_code));
+ __ mtctr(R0);
+ __ mr(R0, addr_reg); // Pass addr in R0.
+ __ bctrl();
+ __ b(*stub->continuation());
+}
+
+#undef __
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ BarrierSet* bs = BarrierSet::barrier_set();
+
+ __ set_info("g1_pre_barrier_slow_id", false);
+
+ // Using stack slots: pre_val (pre-pushed), spill tmp, spill tmp2.
+ const int stack_slots = 3;
+ Register pre_val = R0; // previous value of memory
+ Register tmp = R14;
+ Register tmp2 = R15;
+
+ Label refill, restart, marking_not_active;
+ int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+ int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
+ int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+
+ // Spill
+ __ std(tmp, -16, R1_SP);
+ __ std(tmp2, -24, R1_SP);
+
+ // Is marking still active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ lwz(tmp, satb_q_active_byte_offset, R16_thread);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ lbz(tmp, satb_q_active_byte_offset, R16_thread);
+ }
+ __ cmpdi(CCR0, tmp, 0);
+ __ beq(CCR0, marking_not_active);
+
+ __ bind(restart);
+ // Load the index into the SATB buffer. SATBMarkQueue::_index is a
+ // size_t so ld_ptr is appropriate.
+ __ ld(tmp, satb_q_index_byte_offset, R16_thread);
+
+ // index == 0?
+ __ cmpdi(CCR0, tmp, 0);
+ __ beq(CCR0, refill);
+
+ __ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
+ __ ld(pre_val, -8, R1_SP); // Load from stack.
+ __ addi(tmp, tmp, -oopSize);
+
+ __ std(tmp, satb_q_index_byte_offset, R16_thread);
+ __ stdx(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
+
+ __ bind(marking_not_active);
+ // Restore temp registers and return-from-leaf.
+ __ ld(tmp2, -24, R1_SP);
+ __ ld(tmp, -16, R1_SP);
+ __ blr();
+
+ __ bind(refill);
+ const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
+ __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ mflr(R0);
+ __ std(R0, _abi(lr), R1_SP);
+ __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread);
+ __ pop_frame();
+ __ ld(R0, _abi(lr), R1_SP);
+ __ mtlr(R0);
+ __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ b(restart);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+
+ __ set_info("g1_post_barrier_slow_id", false);
+
+ // Using stack slots: spill addr, spill tmp2
+ const int stack_slots = 2;
+ Register tmp = R0;
+ Register addr = R14;
+ Register tmp2 = R15;
+ jbyte* byte_map_base = bs->card_table()->byte_map_base();
+
+ Label restart, refill, ret;
+
+ // Spill
+ __ std(addr, -8, R1_SP);
+ __ std(tmp2, -16, R1_SP);
+
+ __ srdi(addr, R0, CardTable::card_shift); // Addr is passed in R0.
+ __ load_const_optimized(/*cardtable*/ tmp2, byte_map_base, tmp);
+ __ add(addr, tmp2, addr);
+ __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
+
+ // Return if young card.
+ __ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
+ __ beq(CCR0, ret);
+
+ // Return if sequential consistent value is already dirty.
+ __ membar(Assembler::StoreLoad);
+ __ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
+
+ __ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
+ __ beq(CCR0, ret);
+
+ // Not dirty.
+
+ // First, dirty it.
+ __ li(tmp, G1CardTable::dirty_card_val());
+ __ stb(tmp, 0, addr);
+
+ int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
+ int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
+
+ __ bind(restart);
+
+ // Get the index into the update buffer. DirtyCardQueue::_index is
+ // a size_t so ld_ptr is appropriate here.
+ __ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
+
+ // index == 0?
+ __ cmpdi(CCR0, tmp2, 0);
+ __ beq(CCR0, refill);
+
+ __ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
+ __ addi(tmp2, tmp2, -oopSize);
+
+ __ std(tmp2, dirty_card_q_index_byte_offset, R16_thread);
+ __ add(tmp2, tmp, tmp2);
+ __ std(addr, 0, tmp2); // [_buf + index] := <address_of_card>
+
+ // Restore temp registers and return-from-leaf.
+ __ bind(ret);
+ __ ld(tmp2, -16, R1_SP);
+ __ ld(addr, -8, R1_SP);
+ __ blr();
+
+ __ bind(refill);
+ const int nbytes_save = (MacroAssembler::num_volatile_regs + stack_slots) * BytesPerWord;
+ __ save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ mflr(R0);
+ __ std(R0, _abi(lr), R1_SP);
+ __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), R16_thread);
+ __ pop_frame();
+ __ ld(R0, _abi(lr), R1_SP);
+ __ mtlr(R0);
+ __ restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+ __ b(restart);
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -28,6 +28,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@@ -45,6 +51,14 @@
Register tmp1, Register tmp2, Register tmp3, bool needs_frame);
public:
+#ifdef COMPILER1
+ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register base, RegisterOrConstant ind_or_offs, Register dst,
Register tmp1, Register tmp2, bool needs_frame, Label *is_null = NULL);
--- a/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -50,7 +50,7 @@
Label Lskip_loop, Lstore_loop;
- if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
+ if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
__ sldi_(count, count, LogBytesPerHeapOop);
__ beq(CCR0, Lskip_loop); // zero length
@@ -75,11 +75,13 @@
void CardTableBarrierSetAssembler::card_table_write(MacroAssembler* masm,
jbyte* byte_map_base,
Register tmp, Register obj) {
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
+ CardTable* ct = ctbs->card_table();
assert_different_registers(obj, tmp, R0);
__ load_const_optimized(tmp, (address)byte_map_base, R0);
__ srdi(obj, obj, CardTable::card_shift);
__ li(R0, CardTable::dirty_card_val());
- if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
+ if (ct->scanned_concurrently()) { __ membar(Assembler::StoreStore); }
__ stbx(R0, tmp, obj);
}
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -34,9 +34,6 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
#undef CHECK_BAILOUT
@@ -453,46 +450,4 @@
__ branch_optimized(Assembler::bcondAlways, _continuation);
}
-
-///////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
- // At this point we know that marking is in progress.
- // If do_load() is true then we have to emit the
- // load of the previous value; otherwise it has already
- // been loaded into _pre_val.
- __ bind(_entry);
- ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
- assert(pre_val()->is_register(), "Precondition.");
-
- Register pre_val_reg = pre_val()->as_register();
-
- if (do_load()) {
- ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
- }
-
- __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
- __ branch_optimized(Assembler::bcondZero, _continuation);
- ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_pre_barrier_slow_id));
- CHECK_BAILOUT();
- __ branch_optimized(Assembler::bcondAlways, _continuation);
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
- ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
- assert(addr()->is_register(), "Precondition.");
- assert(new_val()->is_register(), "Precondition.");
- Register new_val_reg = new_val()->as_register();
- __ z_ltgr(new_val_reg, new_val_reg);
- __ branch_optimized(Assembler::bcondZero, _continuation);
- __ z_lgr(Z_R1_scratch, addr()->as_pointer_register());
- ce->emit_call_c(Runtime1::entry_for (Runtime1::g1_post_barrier_slow_id));
- CHECK_BAILOUT();
- __ branch_optimized(Assembler::bcondAlways, _continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-
#undef __
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -572,82 +572,145 @@
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
assert(src->is_constant(), "should not call otherwise");
assert(dest->is_address(), "should not call otherwise");
- // See special case in LIRGenerator::do_StoreIndexed.
- // T_BYTE: Special case for card mark store.
- assert(type == T_BYTE || !dest->as_address_ptr()->index()->is_valid(), "not supported");
+
LIR_Const* c = src->as_constant_ptr();
Address addr = as_Address(dest->as_address_ptr());
int store_offset = -1;
- unsigned int lmem = 0;
- unsigned int lcon = 0;
- int64_t cbits = 0;
- switch (type) {
- case T_INT: // fall through
- case T_FLOAT:
- lmem = 4; lcon = 4; cbits = c->as_jint_bits();
- break;
-
- case T_ADDRESS:
- lmem = 8; lcon = 4; cbits = c->as_jint_bits();
- break;
-
- case T_OBJECT: // fall through
- case T_ARRAY:
- if (c->as_jobject() == NULL) {
- if (UseCompressedOops && !wide) {
- store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
+
+ if (dest->as_address_ptr()->index()->is_valid()) {
+ switch (type) {
+ case T_INT: // fall through
+ case T_FLOAT:
+ __ load_const_optimized(Z_R0_scratch, c->as_jint_bits());
+ store_offset = __ offset();
+ if (Immediate::is_uimm12(addr.disp())) {
+ __ z_st(Z_R0_scratch, addr);
+ } else {
+ __ z_sty(Z_R0_scratch, addr);
+ }
+ break;
+
+ case T_ADDRESS:
+ __ load_const_optimized(Z_R1_scratch, c->as_jint_bits());
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+ break;
+
+ case T_OBJECT: // fall through
+ case T_ARRAY:
+ if (c->as_jobject() == NULL) {
+ if (UseCompressedOops && !wide) {
+ __ clear_reg(Z_R1_scratch, false);
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+ } else {
+ __ clear_reg(Z_R1_scratch, true);
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+ }
+ } else {
+ jobject2reg(c->as_jobject(), Z_R1_scratch);
+ if (UseCompressedOops && !wide) {
+ __ encode_heap_oop(Z_R1_scratch);
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+ } else {
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+ }
+ }
+ assert(store_offset >= 0, "check");
+ break;
+
+ case T_LONG: // fall through
+ case T_DOUBLE:
+ __ load_const_optimized(Z_R1_scratch, (int64_t)(c->as_jlong_bits()));
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+ break;
+
+ case T_BOOLEAN: // fall through
+ case T_BYTE:
+ __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
+ store_offset = __ offset();
+ if (Immediate::is_uimm12(addr.disp())) {
+ __ z_stc(Z_R0_scratch, addr);
+ } else {
+ __ z_stcy(Z_R0_scratch, addr);
+ }
+ break;
+
+ case T_CHAR: // fall through
+ case T_SHORT:
+ __ load_const_optimized(Z_R0_scratch, (int16_t)(c->as_jint()));
+ store_offset = __ offset();
+ if (Immediate::is_uimm12(addr.disp())) {
+ __ z_sth(Z_R0_scratch, addr);
} else {
- store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8);
+ __ z_sthy(Z_R0_scratch, addr);
}
- } else {
- jobject2reg(c->as_jobject(), Z_R1_scratch);
- if (UseCompressedOops && !wide) {
- __ encode_heap_oop(Z_R1_scratch);
- store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ } else { // no index
+
+ unsigned int lmem = 0;
+ unsigned int lcon = 0;
+ int64_t cbits = 0;
+
+ switch (type) {
+ case T_INT: // fall through
+ case T_FLOAT:
+ lmem = 4; lcon = 4; cbits = c->as_jint_bits();
+ break;
+
+ case T_ADDRESS:
+ lmem = 8; lcon = 4; cbits = c->as_jint_bits();
+ break;
+
+ case T_OBJECT: // fall through
+ case T_ARRAY:
+ if (c->as_jobject() == NULL) {
+ if (UseCompressedOops && !wide) {
+ store_offset = __ store_const(addr, (int32_t)NULL_WORD, 4, 4);
+ } else {
+ store_offset = __ store_const(addr, (int64_t)NULL_WORD, 8, 8);
+ }
} else {
- store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+ jobject2reg(c->as_jobject(), Z_R1_scratch);
+ if (UseCompressedOops && !wide) {
+ __ encode_heap_oop(Z_R1_scratch);
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, false);
+ } else {
+ store_offset = __ reg2mem_opt(Z_R1_scratch, addr, true);
+ }
}
- }
+ assert(store_offset >= 0, "check");
+ break;
+
+ case T_LONG: // fall through
+ case T_DOUBLE:
+ lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
+ break;
+
+ case T_BOOLEAN: // fall through
+ case T_BYTE:
+ lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint());
+ break;
+
+ case T_CHAR: // fall through
+ case T_SHORT:
+ lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint());
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ if (store_offset == -1) {
+ store_offset = __ store_const(addr, cbits, lmem, lcon);
assert(store_offset >= 0, "check");
- break;
-
- case T_LONG: // fall through
- case T_DOUBLE:
- lmem = 8; lcon = 8; cbits = (int64_t)(c->as_jlong_bits());
- break;
-
- case T_BOOLEAN: // fall through
- case T_BYTE:
- lmem = 1; lcon = 1; cbits = (int8_t)(c->as_jint());
- break;
-
- case T_CHAR: // fall through
- case T_SHORT:
- lmem = 2; lcon = 2; cbits = (int16_t)(c->as_jint());
- break;
-
- default:
- ShouldNotReachHere();
- };
-
- // Index register is normally not supported, but for
- // LIRGenerator::CardTableBarrierSet_post_barrier we make an exception.
- if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) {
- __ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
- store_offset = __ offset();
- if (Immediate::is_uimm12(addr.disp())) {
- __ z_stc(Z_R0_scratch, addr);
- } else {
- __ z_stcy(Z_R0_scratch, addr);
}
}
- if (store_offset == -1) {
- store_offset = __ store_const(addr, cbits, lmem, lcon);
- assert(store_offset >= 0, "check");
- }
-
if (info != NULL) {
add_debug_info_for_null_check(store_offset, info);
}
--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -140,7 +140,13 @@
int shift, int disp, BasicType type) {
assert(base->is_register(), "must be");
if (index->is_constant()) {
- intptr_t large_disp = ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp;
+ intx large_disp = disp;
+ LIR_Const *constant = index->as_constant_ptr();
+ if (constant->type() == T_LONG) {
+ large_disp += constant->as_jlong() << shift;
+ } else {
+ large_disp += (intx)(constant->as_jint()) << shift;
+ }
if (Displacement::is_validDisp(large_disp)) {
return new LIR_Address(base, large_disp, type);
}
@@ -159,7 +165,7 @@
}
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
- BasicType type, bool needs_card_mark) {
+ BasicType type) {
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
@@ -181,16 +187,7 @@
index_opr,
offset_in_bytes, type);
}
- if (needs_card_mark) {
- // This store will need a precise card mark, so go ahead and
- // compute the full adddres instead of computing once for the
- // store and again for the card mark.
- LIR_Opr tmp = new_pointer_register();
- __ leal(LIR_OprFact::address(addr), tmp);
- return new LIR_Address(tmp, type);
- } else {
- return addr;
- }
+ return addr;
}
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@@ -252,86 +249,11 @@
// visitor functions
//----------------------------------------------------------------------
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
- assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check();
- bool use_length = x->length() != NULL;
- bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
- bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
- !get_jobject_constant(x->value())->is_null_object() ||
- x->should_profile());
-
- LIRItem array(x->array(), this);
- LIRItem index(x->index(), this);
- LIRItem value(x->value(), this);
- LIRItem length(this);
-
- array.load_item();
- index.load_nonconstant(20);
-
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
- if (needs_store_check || x->check_boolean()) {
- value.load_item();
- } else {
- value.load_for_store(x->elt_type());
- }
-
- set_no_result(x);
-
- // The CodeEmitInfo must be duplicated for each different
- // LIR-instruction because spilling can occur anywhere between two
- // instructions and so the debug information must be different.
- CodeEmitInfo* range_check_info = state_for (x);
- CodeEmitInfo* null_check_info = NULL;
- if (x->needs_null_check()) {
- null_check_info = new CodeEmitInfo(range_check_info);
- }
-
- // Emit array address setup early so it schedules better.
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
- if (value.result()->is_constant() && array_addr->index()->is_valid()) {
- // Constants cannot be stored with index register on ZARCH_64 (see LIR_Assembler::const2mem()).
- LIR_Opr tmp = new_pointer_register();
- __ leal(LIR_OprFact::address(array_addr), tmp);
- array_addr = new LIR_Address(tmp, x->elt_type());
- }
-
- if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
- __ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
- } else {
- array_range_check(array.result(), index.result(), null_check_info, range_check_info);
- // Range_check also does the null check.
- null_check_info = NULL;
- }
- }
-
- if (GenerateArrayStoreCheck && needs_store_check) {
- LIR_Opr tmp1 = new_register(objectType);
- LIR_Opr tmp2 = new_register(objectType);
- LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
-
- CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
- __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
- }
-
- if (obj_store) {
- // Needs GC write barriers.
- pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
-
- LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
- __ move(result, array_addr, null_check_info);
-
- if (obj_store) {
- // Precise card mark
- post_barrier(LIR_OprFact::address(array_addr), value.result());
- }
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+ LIR_Opr tmp1 = new_register(objectType);
+ LIR_Opr tmp2 = new_register(objectType);
+ LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
+ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
}
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
@@ -665,59 +587,42 @@
}
}
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
- assert(x->number_of_arguments() == 4, "wrong type");
- LIRItem obj (x->argument_at(0), this); // object
- LIRItem offset(x->argument_at(1), this); // offset of field
- LIRItem cmp (x->argument_at(2), this); // Value to compare with field.
- LIRItem val (x->argument_at(3), this); // Replace field with val if matches cmp.
-
- // Get address of field.
- obj.load_item();
- offset.load_nonconstant(20);
- cmp.load_item();
- val.load_item();
-
- LIR_Opr addr = new_pointer_register();
- LIR_Address* a;
- if (offset.result()->is_constant()) {
- assert(Immediate::is_simm20(offset.result()->as_jlong()), "should have been loaded into register");
- a = new LIR_Address(obj.result(),
- offset.result()->as_jlong(),
- as_BasicType(type));
- } else {
- a = new LIR_Address(obj.result(),
- offset.result(),
- 0,
- as_BasicType(type));
- }
- __ leal(LIR_OprFact::address(a), addr);
-
- if (type == objectType) { // Write-barrier needed for Object fields.
- pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
-
- LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
- if (type == objectType) {
- __ cas_obj(addr, cmp.result(), val.result(), new_register(T_OBJECT), new_register(T_OBJECT));
- } else if (type == intType) {
- __ cas_int(addr, cmp.result(), val.result(), ill, ill);
- } else if (type == longType) {
- __ cas_long(addr, cmp.result(), val.result(), ill, ill);
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+ LIR_Opr t1 = LIR_OprFact::illegalOpr;
+ LIR_Opr t2 = LIR_OprFact::illegalOpr;
+ cmp_value.load_item();
+ new_value.load_item();
+ if (type == T_OBJECT) {
+ if (UseCompressedOops) {
+ t1 = new_register(T_OBJECT);
+ t2 = new_register(T_OBJECT);
+ }
+ __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else if (type == T_INT) {
+ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else if (type == T_LONG) {
+ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
} else {
ShouldNotReachHere();
}
// Generate conditional move of boolean result.
- LIR_Opr result = rlock_result(x);
+ LIR_Opr result = new_register(T_INT);
__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
- result, as_BasicType(type));
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Precise card mark since could either be object or array
- post_barrier(addr, val.result());
- }
+ result, type);
+ return result;
}
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+ Unimplemented(); // Currently not supported on this platform.
+ return LIR_OprFact::illegalOpr;
+}
+
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ __ xadd(addr, value.result(), result, LIR_OprFact::illegalOpr);
+ return result;
+}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
switch (x->id()) {
@@ -1104,57 +1009,6 @@
__ load(address, result, info);
}
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
- BasicType type, bool is_volatile) {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- if (is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- __ move(data, addr);
- assert(src->is_register(), "must be register");
- // Seems to be a precise address.
- post_barrier(LIR_OprFact::address(addr), data);
- } else {
- __ move(data, addr);
- }
-}
-
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
- BasicType type, bool is_volatile) {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- __ load(addr, dst);
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
- BasicType type = x->basic_type();
- assert (x->is_add() && type != T_ARRAY && type != T_OBJECT, "not supported");
- LIRItem src(x->object(), this);
- LIRItem off(x->offset(), this);
- LIRItem value(x->value(), this);
-
- src.load_item();
- value.load_item();
- off.load_nonconstant(20);
-
- LIR_Opr dst = rlock_result(x, type);
- LIR_Opr data = value.result();
- LIR_Opr offset = off.result();
-
- LIR_Address* addr;
- if (offset->is_constant()) {
- assert(Immediate::is_simm20(offset->as_jlong()), "should have been loaded into register");
- addr = new LIR_Address(src.result(), offset->as_jlong(), type);
- } else {
- addr = new LIR_Address(src.result(), offset, type);
- }
-
- __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
-}
-
void LIRGenerator::do_update_CRC32(Intrinsic* x) {
assert(UseCRC32Intrinsics, "or should not be here");
LIR_Opr result = rlock_result(x);
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -42,11 +42,6 @@
#include "utilities/macros.hpp"
#include "vmreg_s390.inline.hpp"
#include "registerSaver_s390.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
// Implementation of StubAssembler
@@ -190,15 +185,6 @@
return RegisterSaver::save_live_registers(sasm, reg_set);
}
-static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
- __ block_comment("save_volatile_registers");
- RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
- int frame_size_in_slots =
- RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
- sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
- return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
-}
-
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
__ block_comment("restore_live_registers");
RegisterSaver::RegisterSet reg_set =
@@ -214,12 +200,6 @@
RegisterSaver::restore_live_registers(sasm, RegisterSaver::all_registers_except_r2);
}
-static void restore_volatile_registers(StubAssembler* sasm) {
- __ block_comment("restore_volatile_registers");
- RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
- RegisterSaver::restore_live_registers(sasm, reg_set);
-}
-
void Runtime1::initialize_pd() {
// Nothing to do.
}
@@ -764,160 +744,6 @@
break;
#endif // TODO
-#if INCLUDE_ALL_GCS
- case g1_pre_barrier_slow_id:
- { // Z_R1_scratch: previous value of memory
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ should_not_reach_here(FILE_AND_LINE);
- break;
- }
-
- __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
- Register pre_val = Z_R1_scratch;
- Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
- Register tmp2 = Z_R7;
-
- Label refill, restart, marking_not_active;
- int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
- int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
- int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
-
- // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
- __ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
- __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-
- // Is marking still active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
- }
- __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
-
- __ bind(restart);
- // Load the index into the SATB buffer. SATBMarkQueue::_index is a
- // size_t so ld_ptr is appropriate.
- __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
-
- // index == 0?
- __ z_brz(refill);
-
- __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
- __ add2reg(tmp, -oopSize);
-
- __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
- __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
-
- __ bind(marking_not_active);
- // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
- __ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
- __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
- __ z_br(Z_R14);
-
- __ bind(refill);
- save_volatile_registers(sasm);
- __ z_lgr(tmp, pre_val); // save pre_val
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
- Z_thread);
- __ z_lgr(pre_val, tmp); // restore pre_val
- restore_volatile_registers(sasm);
- __ z_bru(restart);
- }
- break;
-
- case g1_post_barrier_slow_id:
- { // Z_R1_scratch: oop address, address of updated memory slot
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ should_not_reach_here(FILE_AND_LINE);
- break;
- }
-
- __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
- Register addr_oop = Z_R1_scratch;
- Register addr_card = Z_R1_scratch;
- Register r1 = Z_R6; // Must be saved/restored.
- Register r2 = Z_R7; // Must be saved/restored.
- Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
- jbyte* byte_map_base = ci_card_table_address();
-
- // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
- __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-
- Label not_already_dirty, restart, refill, young_card;
-
- // Calculate address of card corresponding to the updated oop slot.
- AddressLiteral rs(byte_map_base);
- __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
- addr_oop = noreg; // dead now
- __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
- __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
-
- __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
- __ z_bre(young_card);
-
- __ z_sync(); // Required to support concurrent cleaning.
-
- __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
- __ z_brne(not_already_dirty);
-
- __ bind(young_card);
- // We didn't take the branch, so we're already dirty: restore
- // used registers and return.
- __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
- __ z_br(Z_R14);
-
- // Not dirty.
- __ bind(not_already_dirty);
-
- // First, dirty it: [addr_card] := 0
- __ z_mvi(0, addr_card, CardTable::dirty_card_val());
-
- Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
- Register buf = r2;
- cardtable = noreg; // now dead
-
- // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
- __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
-
- ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
- ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
-
- __ bind(restart);
-
- // Get the index into the update buffer. DirtyCardQueue::_index is
- // a size_t so z_ltg is appropriate here.
- __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
-
- // index == 0?
- __ z_brz(refill);
-
- __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
- __ add2reg(idx, -oopSize);
-
- __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
- __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
- // Restore killed registers and return.
- __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
- __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
- __ z_br(Z_R14);
-
- __ bind(refill);
- save_volatile_registers(sasm);
- __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
- Z_thread);
- __ z_lgr(addr_card, idx);
- restore_volatile_registers(sasm); // Restore addr_card.
- __ z_bru(restart);
- }
- break;
-#endif // INCLUDE_ALL_GCS
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,6 +33,11 @@
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
#define __ masm->
@@ -406,4 +411,209 @@
__ bind(Ldone);
}
+#ifdef COMPILER1
+
#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+ __ bind(*stub->entry());
+ ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
+ assert(stub->pre_val()->is_register(), "Precondition.");
+
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+
+ __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
+ __ branch_optimized(Assembler::bcondZero, *stub->continuation());
+ ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
+ __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+ ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register new_val_reg = stub->new_val()->as_register();
+ __ z_ltgr(new_val_reg, new_val_reg);
+ __ branch_optimized(Assembler::bcondZero, *stub->continuation());
+ __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
+ ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
+ __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
+ __ block_comment("save_volatile_registers");
+ RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
+ int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
+ sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
+ return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
+}
+
+static void restore_volatile_registers(StubAssembler* sasm) {
+ __ block_comment("restore_volatile_registers");
+ RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
+ RegisterSaver::restore_live_registers(sasm, reg_set);
+}
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ // Z_R1_scratch: previous value of memory
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+ __ set_info("g1_pre_barrier_slow_id", false);
+
+ Register pre_val = Z_R1_scratch;
+ Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
+ Register tmp2 = Z_R7;
+
+ Label refill, restart, marking_not_active;
+ int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+ int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
+ int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+
+ // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
+ __ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+ __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+
+ // Is marking still active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
+ }
+ __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
+
+ __ bind(restart);
+ // Load the index into the SATB buffer. SATBMarkQueue::_index is a
+ // size_t so ld_ptr is appropriate.
+ __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
+
+ // index == 0?
+ __ z_brz(refill);
+
+ __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
+ __ add2reg(tmp, -oopSize);
+
+ __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
+ __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
+
+ __ bind(marking_not_active);
+ // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
+ __ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+ __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+ __ z_br(Z_R14);
+
+ __ bind(refill);
+ save_volatile_registers(sasm);
+ __ z_lgr(tmp, pre_val); // save pre_val
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread),
+ Z_thread);
+ __ z_lgr(pre_val, tmp); // restore pre_val
+ restore_volatile_registers(sasm);
+ __ z_bru(restart);
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ // Z_R1_scratch: oop address, address of updated memory slot
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+ __ set_info("g1_post_barrier_slow_id", false);
+
+ Register addr_oop = Z_R1_scratch;
+ Register addr_card = Z_R1_scratch;
+ Register r1 = Z_R6; // Must be saved/restored.
+ Register r2 = Z_R7; // Must be saved/restored.
+ Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ jbyte* byte_map_base = ct->byte_map_base();
+
+ // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
+ __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+
+ Label not_already_dirty, restart, refill, young_card;
+
+ // Calculate address of card corresponding to the updated oop slot.
+ AddressLiteral rs(byte_map_base);
+ __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
+ addr_oop = noreg; // dead now
+ __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
+ __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
+
+ __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
+ __ z_bre(young_card);
+
+ __ z_sync(); // Required to support concurrent cleaning.
+
+ __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
+ __ z_brne(not_already_dirty);
+
+ __ bind(young_card);
+ // We didn't take the branch, so we're already dirty: restore
+ // used registers and return.
+ __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+ __ z_br(Z_R14);
+
+ // Not dirty.
+ __ bind(not_already_dirty);
+
+ // First, dirty it: [addr_card] := 0
+ __ z_mvi(0, addr_card, CardTable::dirty_card_val());
+
+ Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
+ Register buf = r2;
+ cardtable = noreg; // now dead
+
+ // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
+ __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+
+ ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
+ ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
+
+ __ bind(restart);
+
+ // Get the index into the update buffer. DirtyCardQueue::_index is
+ // a size_t so z_ltg is appropriate here.
+ __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
+
+ // index == 0?
+ __ z_brz(refill);
+
+ __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
+ __ add2reg(idx, -oopSize);
+
+ __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
+ __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
+ // Restore killed registers and return.
+ __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+ __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
+ __ z_br(Z_R14);
+
+ __ bind(refill);
+ save_volatile_registers(sasm);
+ __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
+ Z_thread);
+ __ z_lgr(addr_card, idx);
+ restore_volatile_registers(sasm); // Restore addr_card.
+ __ z_bru(restart);
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -28,6 +28,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@@ -50,6 +56,14 @@
const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3);
public:
+#ifdef COMPILER1
+ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
const Address& src, Register dst, Register tmp1, Register tmp2, Label *is_null = NULL);
--- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -32,9 +32,6 @@
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
#include "vmreg_sparc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@@ -454,63 +451,4 @@
__ delayed()->nop();
}
-
-///////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
- // At this point we know that marking is in progress.
- // If do_load() is true then we have to emit the
- // load of the previous value; otherwise it has already
- // been loaded into _pre_val.
-
- __ bind(_entry);
-
- assert(pre_val()->is_register(), "Precondition.");
- Register pre_val_reg = pre_val()->as_register();
-
- if (do_load()) {
- ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
- }
-
- if (__ is_in_wdisp16_range(_continuation)) {
- __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
- } else {
- __ cmp(pre_val_reg, G0);
- __ brx(Assembler::equal, false, Assembler::pn, _continuation);
- }
- __ delayed()->nop();
-
- __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
- __ delayed()->mov(pre_val_reg, G4);
- __ br(Assembler::always, false, Assembler::pt, _continuation);
- __ delayed()->nop();
-
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
-
- assert(addr()->is_register(), "Precondition.");
- assert(new_val()->is_register(), "Precondition.");
- Register addr_reg = addr()->as_pointer_register();
- Register new_val_reg = new_val()->as_register();
-
- if (__ is_in_wdisp16_range(_continuation)) {
- __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
- } else {
- __ cmp(new_val_reg, G0);
- __ brx(Assembler::equal, false, Assembler::pn, _continuation);
- }
- __ delayed()->nop();
-
- __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
- __ delayed()->mov(addr_reg, G4);
- __ br(Assembler::always, false, Assembler::pt, _continuation);
- __ delayed()->nop();
-}
-
-#endif // INCLUDE_ALL_GCS
-///////////////////////////////////////////////////////////////////////////////////
-
#undef __
--- a/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -193,7 +193,7 @@
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
- BasicType type, bool needs_card_mark) {
+ BasicType type) {
int elem_size = type2aelembytes(type);
int shift = exact_log2(elem_size);
@@ -231,13 +231,8 @@
__ add(index_opr, array_opr, base_opr);
}
}
- if (needs_card_mark) {
- LIR_Opr ptr = new_pointer_register();
- __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
- return new LIR_Address(ptr, type);
- } else {
- return new LIR_Address(base_opr, offset, type);
- }
+
+ return new LIR_Address(base_opr, offset, type);
}
LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
@@ -311,86 +306,17 @@
}
}
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+ LIR_Opr tmp1 = FrameMap::G1_opr;
+ LIR_Opr tmp2 = FrameMap::G3_opr;
+ LIR_Opr tmp3 = FrameMap::G5_opr;
+ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
+}
+
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
-
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
- assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check();
- bool use_length = x->length() != NULL;
- bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
- bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
- !get_jobject_constant(x->value())->is_null_object() ||
- x->should_profile());
-
- LIRItem array(x->array(), this);
- LIRItem index(x->index(), this);
- LIRItem value(x->value(), this);
- LIRItem length(this);
-
- array.load_item();
- index.load_nonconstant();
-
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
- }
- if (needs_store_check || x->check_boolean()) {
- value.load_item();
- } else {
- value.load_for_store(x->elt_type());
- }
-
- set_no_result(x);
-
- // the CodeEmitInfo must be duplicated for each different
- // LIR-instruction because spilling can occur anywhere between two
- // instructions and so the debug information must be different
- CodeEmitInfo* range_check_info = state_for(x);
- CodeEmitInfo* null_check_info = NULL;
- if (x->needs_null_check()) {
- null_check_info = new CodeEmitInfo(range_check_info);
- }
-
- // emit array address setup early so it schedules better
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
- if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
- __ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
- } else {
- array_range_check(array.result(), index.result(), null_check_info, range_check_info);
- // range_check also does the null check
- null_check_info = NULL;
- }
- }
-
- if (GenerateArrayStoreCheck && needs_store_check) {
- LIR_Opr tmp1 = FrameMap::G1_opr;
- LIR_Opr tmp2 = FrameMap::G3_opr;
- LIR_Opr tmp3 = FrameMap::G5_opr;
-
- CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
- __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
- }
-
- if (obj_store) {
- // Needs GC write barriers.
- pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
- __ move(result, array_addr, null_check_info);
- if (obj_store) {
- // Precise card mark
- post_barrier(LIR_OprFact::address(array_addr), value.result());
- }
-}
-
-
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
LIRItem obj(x->obj(), this);
@@ -635,51 +561,47 @@
}
}
-
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
- assert(x->number_of_arguments() == 4, "wrong type");
- LIRItem obj (x->argument_at(0), this); // object
- LIRItem offset(x->argument_at(1), this); // offset of field
- LIRItem cmp (x->argument_at(2), this); // value to compare with field
- LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
-
- // Use temps to avoid kills
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+ LIR_Opr result = new_register(T_INT);
LIR_Opr t1 = FrameMap::G1_opr;
LIR_Opr t2 = FrameMap::G3_opr;
- LIR_Opr addr = new_pointer_register();
+ cmp_value.load_item();
+ new_value.load_item();
+ if (type == T_OBJECT || type == T_ARRAY) {
+ __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else if (type == T_INT) {
+ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else if (type == T_LONG) {
+ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), t1, t2);
+ } else {
+ Unimplemented();
+ }
+ __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
+ result, type);
+ return result;
+}
- // get address of field
- obj.load_item();
- offset.load_item();
- cmp.load_item();
- val.load_item();
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+ bool is_obj = type == T_OBJECT || type == T_ARRAY;
+ LIR_Opr result = new_register(type);
+ LIR_Opr tmp = LIR_OprFact::illegalOpr;
- __ add(obj.result(), offset.result(), addr);
+ value.load_item();
- if (type == objectType) { // Write-barrier needed for Object fields.
- pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
+ if (is_obj) {
+ tmp = FrameMap::G3_opr;
}
- if (type == objectType)
- __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
- else if (type == intType)
- __ cas_int(addr, cmp.result(), val.result(), t1, t2);
- else if (type == longType)
- __ cas_long(addr, cmp.result(), val.result(), t1, t2);
- else {
- ShouldNotReachHere();
- }
- // generate conditional move of boolean result
- LIR_Opr result = rlock_result(x);
- __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
- result, as_BasicType(type));
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Precise card mark since could either be object or array
- post_barrier(addr, val.result());
- }
+ // Because we want a 2-arg form of xchg
+ __ move(value.result(), result);
+ __ xchg(addr, result, result, tmp);
+ return result;
}
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+ Unimplemented();
+ return LIR_OprFact::illegalOpr;
+}
void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
switch (x->id()) {
@@ -1338,94 +1260,3 @@
CodeEmitInfo* info) {
__ load(address, result, info);
}
-
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
- BasicType type, bool is_volatile) {
- LIR_Opr base_op = src;
- LIR_Opr index_op = offset;
-
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- {
- if (type == T_BOOLEAN) {
- type = T_BYTE;
- }
- LIR_Address* addr;
- if (type == T_ARRAY || type == T_OBJECT) {
- LIR_Opr tmp = new_pointer_register();
- __ add(base_op, index_op, tmp);
- addr = new LIR_Address(tmp, type);
- } else {
- addr = new LIR_Address(base_op, index_op, type);
- }
-
- if (is_obj) {
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
- }
- __ move(data, addr);
- if (is_obj) {
- // This address is precise
- post_barrier(LIR_OprFact::address(addr), data);
- }
- }
-}
-
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
- BasicType type, bool is_volatile) {
- {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- __ load(addr, dst);
- }
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
- BasicType type = x->basic_type();
- LIRItem src(x->object(), this);
- LIRItem off(x->offset(), this);
- LIRItem value(x->value(), this);
-
- src.load_item();
- value.load_item();
- off.load_nonconstant();
-
- LIR_Opr dst = rlock_result(x, type);
- LIR_Opr data = value.result();
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- LIR_Opr offset = off.result();
-
- // Because we want a 2-arg form of xchg
- __ move(data, dst);
-
- assert (!x->is_add() && (type == T_INT || (is_obj && UseCompressedOops)), "unexpected type");
- LIR_Address* addr;
- if (offset->is_constant()) {
-
- jlong l = offset->as_jlong();
- assert((jlong)((jint)l) == l, "offset too large for constant");
- jint c = (jint)l;
- addr = new LIR_Address(src.result(), c, type);
- } else {
- addr = new LIR_Address(src.result(), offset, type);
- }
-
- LIR_Opr tmp = LIR_OprFact::illegalOpr;
- LIR_Opr ptr = LIR_OprFact::illegalOpr;
-
- if (is_obj) {
- // Do the pre-write barrier, if any.
- // barriers on sparc don't work with a base + index address
- tmp = FrameMap::G3_opr;
- ptr = new_pointer_register();
- __ add(src.result(), off.result(), ptr);
- pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- __ xchg(LIR_OprFact::address(addr), dst, dst, tmp);
- if (is_obj) {
- // Seems to be a precise address
- post_barrier(ptr, data);
- }
-}
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -92,4 +92,7 @@
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
+ void save_live_registers_no_oop_map(bool save_fpu_registers);
+ void restore_live_registers(bool restore_fpu_registers);
+
#endif // CPU_SPARC_VM_C1_MACROASSEMBLER_SPARC_HPP
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -40,11 +40,6 @@
#include "utilities/macros.hpp"
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
// Implementation of StubAssembler
@@ -145,10 +140,16 @@
return call_RT(oop_result1, metadata_result, entry, 3);
}
+void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
+ set_info(name, must_gc_arguments);
+}
+
+void StubAssembler::epilogue() {
+ delayed()->restore();
+}
// Implementation of Runtime1
-#define __ sasm->
static int cpu_reg_save_offsets[FrameMap::nof_cpu_regs];
static int fpu_reg_save_offsets[FrameMap::nof_fpu_regs];
@@ -156,7 +157,7 @@
static int frame_size_in_bytes = -1;
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
- assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
+ assert(frame_size_in_bytes == sasm->total_frame_size_in_bytes(reg_save_size_in_words),
"mismatch in calculation");
sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
@@ -183,7 +184,9 @@
return oop_map;
}
-static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
+#define __ this->
+
+void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) {
assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
"mismatch in calculation");
__ save_frame_c1(frame_size_in_bytes);
@@ -211,11 +214,9 @@
__ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
}
}
-
- return generate_oop_map(sasm, save_fpu_registers);
}
-static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
+void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
Register r = as_Register(i);
if (r == G1 || r == G3 || r == G4 || r == G5) {
@@ -231,6 +232,18 @@
}
}
+#undef __
+#define __ sasm->
+
+static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
+ sasm->save_live_registers_no_oop_map(save_fpu_registers);
+ return generate_oop_map(sasm, save_fpu_registers);
+}
+
+static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
+ sasm->restore_live_registers(restore_fpu_registers);
+}
+
void Runtime1::initialize_pd() {
// compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
@@ -759,165 +772,6 @@
}
break;
-#if INCLUDE_ALL_GCS
- case g1_pre_barrier_slow_id:
- { // G4: previous value of memory
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ save_frame(0);
- __ set((int)id, O1);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
- __ should_not_reach_here();
- break;
- }
-
- __ set_info("g1_pre_barrier_slow_id", dont_gc_arguments);
-
- Register pre_val = G4;
- Register tmp = G1_scratch;
- Register tmp2 = G3_scratch;
-
- Label refill, restart;
- int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
- int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
- int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
-
- // Is marking still active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ ld(G2_thread, satb_q_active_byte_offset, tmp);
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ ldsb(G2_thread, satb_q_active_byte_offset, tmp);
- }
- __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart);
- __ retl();
- __ delayed()->nop();
-
- __ bind(restart);
- // Load the index into the SATB buffer. SATBMarkQueue::_index is a
- // size_t so ld_ptr is appropriate
- __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
-
- // index == 0?
- __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
-
- __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
- __ sub(tmp, oopSize, tmp);
-
- __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
- // Use return-from-leaf
- __ retl();
- __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
-
- __ bind(refill);
-
- save_live_registers(sasm);
-
- __ call_VM_leaf(L7_thread_cache,
- CAST_FROM_FN_PTR(address,
- SATBMarkQueueSet::handle_zero_index_for_thread),
- G2_thread);
-
- restore_live_registers(sasm);
-
- __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
- __ delayed()->restore();
- }
- break;
-
- case g1_post_barrier_slow_id:
- {
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ save_frame(0);
- __ set((int)id, O1);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), I0);
- __ should_not_reach_here();
- break;
- }
-
- __ set_info("g1_post_barrier_slow_id", dont_gc_arguments);
-
- Register addr = G4;
- Register cardtable = G5;
- Register tmp = G1_scratch;
- Register tmp2 = G3_scratch;
- jbyte* byte_map_base = ci_card_table_address();
-
- Label not_already_dirty, restart, refill, young_card;
-
- __ srlx(addr, CardTable::card_shift, addr);
-
- AddressLiteral rs(byte_map_base);
- __ set(rs, cardtable); // cardtable := <card table base>
- __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
-
- __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
-
- __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
- __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
-
- assert(CardTable::dirty_card_val() == 0, "otherwise check this code");
- __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
-
- __ bind(young_card);
- // We didn't take the branch, so we're already dirty: return.
- // Use return-from-leaf
- __ retl();
- __ delayed()->nop();
-
- // Not dirty.
- __ bind(not_already_dirty);
-
- // Get cardtable + tmp into a reg by itself
- __ add(addr, cardtable, tmp2);
-
- // First, dirty it.
- __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
-
- Register tmp3 = cardtable;
- Register tmp4 = tmp;
-
- // these registers are now dead
- addr = cardtable = tmp = noreg;
-
- int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
- int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
-
- __ bind(restart);
-
- // Get the index into the update buffer. DirtyCardQueue::_index is
- // a size_t so ld_ptr is appropriate here.
- __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
-
- // index == 0?
- __ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill);
-
- __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
- __ sub(tmp3, oopSize, tmp3);
-
- __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
- // Use return-from-leaf
- __ retl();
- __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
-
- __ bind(refill);
-
- save_live_registers(sasm);
-
- __ call_VM_leaf(L7_thread_cache,
- CAST_FROM_FN_PTR(address,
- DirtyCardQueueSet::handle_zero_index_for_thread),
- G2_thread);
-
- restore_live_registers(sasm);
-
- __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
- __ delayed()->restore();
- }
- break;
-#endif // INCLUDE_ALL_GCS
-
case predicate_failed_trap_id:
{
__ set_info("predicate_failed_trap", dont_gc_arguments);
--- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,13 +25,18 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
#define __ masm->
@@ -476,8 +481,6 @@
}
}
-#undef __
-
void G1BarrierSetAssembler::barrier_stubs_init() {
if (dirty_card_log_enqueue == 0) {
G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
@@ -494,3 +497,211 @@
assert(satb_log_enqueue_frameless != 0, "postcondition.");
}
}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+
+ __ bind(*stub->entry());
+
+ assert(stub->pre_val()->is_register(), "Precondition.");
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+
+ if (__ is_in_wdisp16_range(*stub->continuation())) {
+ __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, *stub->continuation());
+ } else {
+ __ cmp(pre_val_reg, G0);
+ __ brx(Assembler::equal, false, Assembler::pn, *stub->continuation());
+ }
+ __ delayed()->nop();
+
+ __ call(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
+ __ delayed()->mov(pre_val_reg, G4);
+ __ br(Assembler::always, false, Assembler::pt, *stub->continuation());
+ __ delayed()->nop();
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register addr_reg = stub->addr()->as_pointer_register();
+ Register new_val_reg = stub->new_val()->as_register();
+
+ if (__ is_in_wdisp16_range(*stub->continuation())) {
+ __ br_null(new_val_reg, /*annul*/false, Assembler::pt, *stub->continuation());
+ } else {
+ __ cmp(new_val_reg, G0);
+ __ brx(Assembler::equal, false, Assembler::pn, *stub->continuation());
+ }
+ __ delayed()->nop();
+
+ __ call(bs->post_barrier_c1_runtime_code_blob()->code_begin());
+ __ delayed()->mov(addr_reg, G4);
+ __ br(Assembler::always, false, Assembler::pt, *stub->continuation());
+ __ delayed()->nop();
+}
+
+#undef __
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ __ prologue("g1_pre_barrier", false);
+
+ // G4: previous value of memory
+
+ Register pre_val = G4;
+ Register tmp = G1_scratch;
+ Register tmp2 = G3_scratch;
+
+ Label refill, restart;
+ int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
+ int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
+ int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
+
+ // Is marking still active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ ld(G2_thread, satb_q_active_byte_offset, tmp);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ ldsb(G2_thread, satb_q_active_byte_offset, tmp);
+ }
+ __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, restart);
+ __ retl();
+ __ delayed()->nop();
+
+ __ bind(restart);
+ // Load the index into the SATB buffer. SATBMarkQueue::_index is a
+ // size_t so ld_ptr is appropriate
+ __ ld_ptr(G2_thread, satb_q_index_byte_offset, tmp);
+
+ // index == 0?
+ __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pn, refill);
+
+ __ ld_ptr(G2_thread, satb_q_buf_byte_offset, tmp2);
+ __ sub(tmp, oopSize, tmp);
+
+ __ st_ptr(pre_val, tmp2, tmp); // [_buf + index] := <address_of_card>
+ // Use return-from-leaf
+ __ retl();
+ __ delayed()->st_ptr(tmp, G2_thread, satb_q_index_byte_offset);
+
+ __ bind(refill);
+
+ __ save_live_registers_no_oop_map(true);
+
+ __ call_VM_leaf(L7_thread_cache,
+ CAST_FROM_FN_PTR(address,
+ SATBMarkQueueSet::handle_zero_index_for_thread),
+ G2_thread);
+
+ __ restore_live_registers(true);
+
+ __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
+ __ epilogue();
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ __ prologue("g1_post_barrier", false);
+
+ G1BarrierSet* bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+
+ Register addr = G4;
+ Register cardtable = G5;
+ Register tmp = G1_scratch;
+ Register tmp2 = G3_scratch;
+ jbyte* byte_map_base = bs->card_table()->byte_map_base();
+
+ Label not_already_dirty, restart, refill, young_card;
+
+#ifdef _LP64
+ __ srlx(addr, CardTable::card_shift, addr);
+#else
+ __ srl(addr, CardTable::card_shift, addr);
+#endif
+
+ AddressLiteral rs((address)byte_map_base);
+ __ set(rs, cardtable); // cardtable := <card table base>
+ __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
+
+ __ cmp_and_br_short(tmp, G1CardTable::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+
+ __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+ __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
+
+ assert(G1CardTable::dirty_card_val() == 0, "otherwise check this code");
+ __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
+
+ __ bind(young_card);
+ // We didn't take the branch, so we're already dirty: return.
+ // Use return-from-leaf
+ __ retl();
+ __ delayed()->nop();
+
+ // Not dirty.
+ __ bind(not_already_dirty);
+
+ // Get cardtable + tmp into a reg by itself
+ __ add(addr, cardtable, tmp2);
+
+ // First, dirty it.
+ __ stb(G0, tmp2, 0); // [cardPtr] := 0 (i.e., dirty).
+
+ Register tmp3 = cardtable;
+ Register tmp4 = tmp;
+
+ // these registers are now dead
+ addr = cardtable = tmp = noreg;
+
+ int dirty_card_q_index_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
+ int dirty_card_q_buf_byte_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
+
+ __ bind(restart);
+
+ // Get the index into the update buffer. DirtyCardQueue::_index is
+ // a size_t so ld_ptr is appropriate here.
+ __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, tmp3);
+
+ // index == 0?
+ __ cmp_and_brx_short(tmp3, G0, Assembler::equal, Assembler::pn, refill);
+
+ __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, tmp4);
+ __ sub(tmp3, oopSize, tmp3);
+
+ __ st_ptr(tmp2, tmp4, tmp3); // [_buf + index] := <address_of_card>
+ // Use return-from-leaf
+ __ retl();
+ __ delayed()->st_ptr(tmp3, G2_thread, dirty_card_q_index_byte_offset);
+
+ __ bind(refill);
+
+ __ save_live_registers_no_oop_map(true);
+
+ __ call_VM_leaf(L7_thread_cache,
+ CAST_FROM_FN_PTR(address,
+ DirtyCardQueueSet::handle_zero_index_for_thread),
+ G2_thread);
+
+ __ restore_live_registers(true);
+
+ __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
+ __ epilogue();
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -27,6 +27,12 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
+#include "utilities/macros.hpp"
+
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
@@ -40,6 +46,14 @@
Register val, Address dst, Register tmp);
public:
+#ifdef COMPILER1
+ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+#endif
+
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address src, Register dst, Register tmp);
virtual void barrier_stubs_init();
--- a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"
+#include "runtime/jniHandles.hpp"
#define __ masm->
@@ -98,3 +99,8 @@
default: Unimplemented();
}
}
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath) {
+ __ andn (robj, JNIHandles::weak_tag_mask, robj);
+ __ ld_ptr(robj, 0, robj);
+}
--- a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -44,6 +44,9 @@
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address src, Register dst, Register tmp);
+ // Support for jniFastGetField to try resolving a jobject/jweak in native
+ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath);
+
virtual void barrier_stubs_init() {}
};
--- a/src/hotspot/cpu/sparc/jniFastGetField_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/jniFastGetField_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
@@ -68,17 +70,18 @@
__ andcc (G4, 1, G0);
__ br (Assembler::notZero, false, Assembler::pn, label1);
__ delayed()->srl (O2, 2, O4);
- __ andn (O1, JNIHandles::weak_tag_mask, O1);
- __ ld_ptr (O1, 0, O5);
+
+ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->try_resolve_jobject_in_native(masm, O1, G3_scratch, label1);
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
switch (type) {
- case T_BOOLEAN: __ ldub (O5, O4, G3); break;
- case T_BYTE: __ ldsb (O5, O4, G3); break;
- case T_CHAR: __ lduh (O5, O4, G3); break;
- case T_SHORT: __ ldsh (O5, O4, G3); break;
- case T_INT: __ ld (O5, O4, G3); break;
+ case T_BOOLEAN: __ ldub (O1, O4, G3); break;
+ case T_BYTE: __ ldsb (O1, O4, G3); break;
+ case T_CHAR: __ lduh (O1, O4, G3); break;
+ case T_SHORT: __ ldsh (O1, O4, G3); break;
+ case T_INT: __ ld (O1, O4, G3); break;
default: ShouldNotReachHere();
}
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -35,6 +35,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
--- a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -31,6 +31,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/preserveException.hpp"
--- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,9 +33,6 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif // INCLUDE_ALL_GCS
#define __ ce->masm()->
@@ -521,45 +518,4 @@
__ jmp(_continuation);
}
-/////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
- // At this point we know that marking is in progress.
- // If do_load() is true then we have to emit the
- // load of the previous value; otherwise it has already
- // been loaded into _pre_val.
-
- __ bind(_entry);
- assert(pre_val()->is_register(), "Precondition.");
-
- Register pre_val_reg = pre_val()->as_register();
-
- if (do_load()) {
- ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
- }
-
- __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
- __ jcc(Assembler::equal, _continuation);
- ce->store_parameter(pre_val()->as_register(), 0);
- __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
- __ jmp(_continuation);
-
-}
-
-void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
- __ bind(_entry);
- assert(addr()->is_register(), "Precondition.");
- assert(new_val()->is_register(), "Precondition.");
- Register new_val_reg = new_val()->as_register();
- __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
- __ jcc(Assembler::equal, _continuation);
- ce->store_parameter(addr()->as_pointer_register(), 0);
- __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
- __ jmp(_continuation);
-}
-
-#endif // INCLUDE_ALL_GCS
-/////////////////////////////////////////////////////////////////////////////
-
#undef __
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,6 +33,7 @@
#include "ci/ciArray.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciTypeArrayKlass.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_x86.inline.hpp"
@@ -152,9 +153,27 @@
int shift, int disp, BasicType type) {
assert(base->is_register(), "must be");
if (index->is_constant()) {
+ LIR_Const *constant = index->as_constant_ptr();
+#ifdef _LP64
+ jlong c;
+ if (constant->type() == T_INT) {
+ c = (jlong(index->as_jint()) << shift) + disp;
+ } else {
+ assert(constant->type() == T_LONG, "should be");
+ c = (index->as_jlong() << shift) + disp;
+ }
+ if ((jlong)((jint)c) == c) {
+ return new LIR_Address(base, (jint)c, type);
+ } else {
+ LIR_Opr tmp = new_register(T_LONG);
+ __ move(index, tmp);
+ return new LIR_Address(base, tmp, type);
+ }
+#else
return new LIR_Address(base,
- ((intx)(index->as_constant_ptr()->as_jint()) << shift) + disp,
+ ((intx)(constant->as_jint()) << shift) + disp,
type);
+#endif
} else {
return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
}
@@ -162,7 +181,7 @@
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
- BasicType type, bool needs_card_mark) {
+ BasicType type) {
int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
LIR_Address* addr;
@@ -183,16 +202,7 @@
LIR_Address::scale(type),
offset_in_bytes, type);
}
- if (needs_card_mark) {
- // This store will need a precise card mark, so go ahead and
- // compute the full adddres instead of computing once for the
- // store and again for the card mark.
- LIR_Opr tmp = new_pointer_register();
- __ leal(LIR_OprFact::address(addr), tmp);
- return new LIR_Address(tmp, type);
- } else {
- return addr;
- }
+ return addr;
}
@@ -253,87 +263,17 @@
__ store(item, new LIR_Address(FrameMap::rsp_opr, in_bytes(offset_from_sp), type));
}
+void LIRGenerator::array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci) {
+ LIR_Opr tmp1 = new_register(objectType);
+ LIR_Opr tmp2 = new_register(objectType);
+ LIR_Opr tmp3 = new_register(objectType);
+ __ store_check(value, array, tmp1, tmp2, tmp3, store_check_info, profiled_method, profiled_bci);
+}
+
//----------------------------------------------------------------------
// visitor functions
//----------------------------------------------------------------------
-
-void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
- assert(x->is_pinned(),"");
- bool needs_range_check = x->compute_needs_range_check();
- bool use_length = x->length() != NULL;
- bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
- bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
- !get_jobject_constant(x->value())->is_null_object() ||
- x->should_profile());
-
- LIRItem array(x->array(), this);
- LIRItem index(x->index(), this);
- LIRItem value(x->value(), this);
- LIRItem length(this);
-
- array.load_item();
- index.load_nonconstant();
-
- if (use_length && needs_range_check) {
- length.set_instruction(x->length());
- length.load_item();
-
- }
- if (needs_store_check || x->check_boolean()) {
- value.load_item();
- } else {
- value.load_for_store(x->elt_type());
- }
-
- set_no_result(x);
-
- // the CodeEmitInfo must be duplicated for each different
- // LIR-instruction because spilling can occur anywhere between two
- // instructions and so the debug information must be different
- CodeEmitInfo* range_check_info = state_for(x);
- CodeEmitInfo* null_check_info = NULL;
- if (x->needs_null_check()) {
- null_check_info = new CodeEmitInfo(range_check_info);
- }
-
- // emit array address setup early so it schedules better
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
-
- if (GenerateRangeChecks && needs_range_check) {
- if (use_length) {
- __ cmp(lir_cond_belowEqual, length.result(), index.result());
- __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
- } else {
- array_range_check(array.result(), index.result(), null_check_info, range_check_info);
- // range_check also does the null check
- null_check_info = NULL;
- }
- }
-
- if (GenerateArrayStoreCheck && needs_store_check) {
- LIR_Opr tmp1 = new_register(objectType);
- LIR_Opr tmp2 = new_register(objectType);
- LIR_Opr tmp3 = new_register(objectType);
-
- CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
- __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
- }
-
- if (obj_store) {
- // Needs GC write barriers.
- pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- __ move(value.result(), array_addr, null_check_info);
- // Seems to be a precise
- post_barrier(LIR_OprFact::address(array_addr), value.result());
- } else {
- LIR_Opr result = maybe_mask_boolean(x, array.result(), value.result(), null_check_info);
- __ move(result, array_addr, null_check_info);
- }
-}
-
-
void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
assert(x->is_pinned(),"");
LIRItem obj(x->obj(), this);
@@ -715,93 +655,48 @@
}
}
-
-void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
- assert(x->number_of_arguments() == 4, "wrong type");
- LIRItem obj (x->argument_at(0), this); // object
- LIRItem offset(x->argument_at(1), this); // offset of field
- LIRItem cmp (x->argument_at(2), this); // value to compare with field
- LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
-
- assert(obj.type()->tag() == objectTag, "invalid type");
-
- // In 64bit the type can be long, sparc doesn't have this assert
- // assert(offset.type()->tag() == intTag, "invalid type");
-
- assert(cmp.type()->tag() == type->tag(), "invalid type");
- assert(val.type()->tag() == type->tag(), "invalid type");
-
- // get address of field
- obj.load_item();
- offset.load_nonconstant();
-
- LIR_Opr addr = new_pointer_register();
- LIR_Address* a;
- if(offset.result()->is_constant()) {
-#ifdef _LP64
- jlong c = offset.result()->as_jlong();
- if ((jlong)((jint)c) == c) {
- a = new LIR_Address(obj.result(),
- (jint)c,
- as_BasicType(type));
- } else {
- LIR_Opr tmp = new_register(T_LONG);
- __ move(offset.result(), tmp);
- a = new LIR_Address(obj.result(),
- tmp,
- as_BasicType(type));
- }
-#else
- a = new LIR_Address(obj.result(),
- offset.result()->as_jint(),
- as_BasicType(type));
-#endif
+LIR_Opr LIRGenerator::atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value) {
+ LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
+ if (type == T_OBJECT || type == T_ARRAY) {
+ cmp_value.load_item_force(FrameMap::rax_oop_opr);
+ new_value.load_item();
+ __ cas_obj(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
+ } else if (type == T_INT) {
+ cmp_value.load_item_force(FrameMap::rax_opr);
+ new_value.load_item();
+ __ cas_int(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
+ } else if (type == T_LONG) {
+ cmp_value.load_item_force(FrameMap::long0_opr);
+ new_value.load_item_force(FrameMap::long1_opr);
+ __ cas_long(addr->as_address_ptr()->base(), cmp_value.result(), new_value.result(), ill, ill);
} else {
- a = new LIR_Address(obj.result(),
- offset.result(),
- 0,
- as_BasicType(type));
+ Unimplemented();
}
- __ leal(LIR_OprFact::address(a), addr);
-
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Do the pre-write barrier, if any.
- pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
+ LIR_Opr result = new_register(T_INT);
+ __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
+ result, type);
+ return result;
+}
- if (type == objectType) {
- cmp.load_item_force(FrameMap::rax_oop_opr);
- val.load_item();
- } else if (type == intType) {
- cmp.load_item_force(FrameMap::rax_opr);
- val.load_item();
- } else if (type == longType) {
- cmp.load_item_force(FrameMap::long0_opr);
- val.load_item_force(FrameMap::long1_opr);
- } else {
- ShouldNotReachHere();
- }
+LIR_Opr LIRGenerator::atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& value) {
+ bool is_oop = type == T_OBJECT || type == T_ARRAY;
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ // Because we want a 2-arg form of xchg and xadd
+ __ move(value.result(), result);
+ assert(type == T_INT || is_oop LP64_ONLY( || type == T_LONG ), "unexpected type");
+ __ xchg(addr, result, result, LIR_OprFact::illegalOpr);
+ return result;
+}
- LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
- if (type == objectType)
- __ cas_obj(addr, cmp.result(), val.result(), ill, ill);
- else if (type == intType)
- __ cas_int(addr, cmp.result(), val.result(), ill, ill);
- else if (type == longType)
- __ cas_long(addr, cmp.result(), val.result(), ill, ill);
- else {
- ShouldNotReachHere();
- }
-
- // generate conditional move of boolean result
- LIR_Opr result = rlock_result(x);
- __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
- result, as_BasicType(type));
- if (type == objectType) { // Write-barrier needed for Object fields.
- // Seems to be precise
- post_barrier(addr, val.result());
- }
+LIR_Opr LIRGenerator::atomic_add(BasicType type, LIR_Opr addr, LIRItem& value) {
+ LIR_Opr result = new_register(type);
+ value.load_item();
+ // Because we want a 2-arg form of xchg and xadd
+ __ move(value.result(), result);
+ assert(type == T_INT LP64_ONLY( || type == T_LONG ), "unexpected type");
+ __ xadd(addr, result, result, LIR_OprFact::illegalOpr);
+ return result;
}
void LIRGenerator::do_FmaIntrinsic(Intrinsic* x) {
@@ -1570,8 +1465,6 @@
}
}
-
-
void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
CodeEmitInfo* info) {
if (address->type() == T_LONG) {
@@ -1593,100 +1486,3 @@
__ load(address, result, info);
}
}
-
-void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
- BasicType type, bool is_volatile) {
- if (is_volatile && type == T_LONG) {
- LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
- LIR_Opr tmp = new_register(T_DOUBLE);
- __ load(addr, tmp);
- LIR_Opr spill = new_register(T_LONG);
- set_vreg_flag(spill, must_start_in_memory);
- __ move(tmp, spill);
- __ move(spill, dst);
- } else {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- __ load(addr, dst);
- }
-}
-
-
-void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
- BasicType type, bool is_volatile) {
- if (is_volatile && type == T_LONG) {
- LIR_Address* addr = new LIR_Address(src, offset, T_DOUBLE);
- LIR_Opr tmp = new_register(T_DOUBLE);
- LIR_Opr spill = new_register(T_DOUBLE);
- set_vreg_flag(spill, must_start_in_memory);
- __ move(data, spill);
- __ move(spill, tmp);
- __ move(tmp, addr);
- } else {
- LIR_Address* addr = new LIR_Address(src, offset, type);
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- if (is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- __ move(data, addr);
- assert(src->is_register(), "must be register");
- // Seems to be a precise address
- post_barrier(LIR_OprFact::address(addr), data);
- } else {
- __ move(data, addr);
- }
- }
-}
-
-void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
- BasicType type = x->basic_type();
- LIRItem src(x->object(), this);
- LIRItem off(x->offset(), this);
- LIRItem value(x->value(), this);
-
- src.load_item();
- value.load_item();
- off.load_nonconstant();
-
- LIR_Opr dst = rlock_result(x, type);
- LIR_Opr data = value.result();
- bool is_obj = (type == T_ARRAY || type == T_OBJECT);
- LIR_Opr offset = off.result();
-
- assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
- LIR_Address* addr;
- if (offset->is_constant()) {
-#ifdef _LP64
- jlong c = offset->as_jlong();
- if ((jlong)((jint)c) == c) {
- addr = new LIR_Address(src.result(), (jint)c, type);
- } else {
- LIR_Opr tmp = new_register(T_LONG);
- __ move(offset, tmp);
- addr = new LIR_Address(src.result(), tmp, type);
- }
-#else
- addr = new LIR_Address(src.result(), offset->as_jint(), type);
-#endif
- } else {
- addr = new LIR_Address(src.result(), offset, type);
- }
-
- // Because we want a 2-arg form of xchg and xadd
- __ move(data, dst);
-
- if (x->is_add()) {
- __ xadd(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
- } else {
- if (is_obj) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load */, false /* patch */, NULL);
- }
- __ xchg(LIR_OprFact::address(addr), dst, dst, LIR_OprFact::illegalOpr);
- if (is_obj) {
- // Seems to be a precise address
- post_barrier(LIR_OprFact::address(addr), data);
- }
- }
-}
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -356,6 +356,15 @@
verify_FPU(0, "method_entry");
}
+void C1_MacroAssembler::load_parameter(int offset_in_words, Register reg) {
+ // rbp, + 0: link
+ // + 1: return address
+ // + 2: argument with offset 0
+ // + 3: argument with offset 1
+ // + 4: ...
+
+ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
+}
#ifndef PRODUCT
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -121,4 +121,9 @@
// This platform only uses signal-based null checks. The Label is not needed.
void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
+ void load_parameter(int offset_in_words, Register reg);
+
+ void save_live_registers_no_oop_map(int num_rt_args, bool save_fpu_registers);
+ void restore_live_registers(bool restore_fpu_registers);
+
#endif // CPU_X86_VM_C1_MACROASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -41,12 +41,6 @@
#include "runtime/vframeArray.hpp"
#include "utilities/macros.hpp"
#include "vmreg_x86.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#include "gc/g1/g1CardTable.hpp"
-#include "gc/g1/g1ThreadLocalData.hpp"
-#endif
-
// Implementation of StubAssembler
@@ -212,31 +206,32 @@
~StubFrame();
};
+void StubAssembler::prologue(const char* name, bool must_gc_arguments) {
+ set_info(name, must_gc_arguments);
+ enter();
+}
+
+void StubAssembler::epilogue() {
+ leave();
+ ret(0);
+}
#define __ _sasm->
StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) {
_sasm = sasm;
- __ set_info(name, must_gc_arguments);
- __ enter();
+ __ prologue(name, must_gc_arguments);
}
// load parameters that were stored with LIR_Assembler::store_parameter
// Note: offsets for store_parameter and load_argument must match
void StubFrame::load_argument(int offset_in_words, Register reg) {
- // rbp, + 0: link
- // + 1: return address
- // + 2: argument with offset 0
- // + 3: argument with offset 1
- // + 4: ...
-
- __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
+ __ load_parameter(offset_in_words, reg);
}
StubFrame::~StubFrame() {
- __ leave();
- __ ret(0);
+ __ epilogue();
}
#undef __
@@ -244,8 +239,6 @@
// Implementation of Runtime1
-#define __ sasm->
-
const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
@@ -310,8 +303,6 @@
reg_save_frame_size // As noted: neglects any parameters to runtime // 504
};
-
-
// Save off registers which might be killed by calls into the runtime.
// Tries to smart of about FP registers. In particular we separate
// saving and describing the FPU registers for deoptimization since we
@@ -418,8 +409,9 @@
return map;
}
-static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
- bool save_fpu_registers = true) {
+#define __ this->
+
+void C1_MacroAssembler::save_live_registers_no_oop_map(int num_rt_args, bool save_fpu_registers) {
__ block_comment("save_live_registers");
__ pusha(); // integer registers
@@ -493,12 +485,12 @@
// FPU stack must be empty now
__ verify_FPU(0, "save_live_registers");
-
- return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
}
+#undef __
+#define __ sasm->
-static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true) {
+static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) {
if (restore_fpu_registers) {
if (UseSSE >= 2) {
// restore XMM registers
@@ -549,14 +541,28 @@
__ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
}
+#undef __
+#define __ this->
-static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
+void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) {
__ block_comment("restore_live_registers");
- restore_fpu(sasm, restore_fpu_registers);
+ restore_fpu(this, restore_fpu_registers);
__ popa();
}
+#undef __
+#define __ sasm->
+
+static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
+ bool save_fpu_registers = true) {
+ sasm->save_live_registers_no_oop_map(num_rt_args, save_fpu_registers);
+ return generate_oop_map(sasm, num_rt_args, save_fpu_registers);
+}
+
+static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
+ sasm->restore_live_registers(restore_fpu_registers);
+}
static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) {
__ block_comment("restore_live_registers_except_rax");
@@ -1557,159 +1563,6 @@
}
break;
-#if INCLUDE_ALL_GCS
- case g1_pre_barrier_slow_id:
- {
- StubFrame f(sasm, "g1_pre_barrier", dont_gc_arguments);
- // arg0 : previous value of memory
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ movptr(rax, (int)id);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
- __ should_not_reach_here();
- break;
- }
- __ push(rax);
- __ push(rdx);
-
- const Register pre_val = rax;
- const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
- const Register tmp = rdx;
-
- NOT_LP64(__ get_thread(thread);)
-
- Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
- Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
- Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
-
- Label done;
- Label runtime;
-
- // Is marking still active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ cmpl(queue_active, 0);
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ cmpb(queue_active, 0);
- }
- __ jcc(Assembler::equal, done);
-
- // Can we store original value in the thread's buffer?
-
- __ movptr(tmp, queue_index);
- __ testptr(tmp, tmp);
- __ jcc(Assembler::zero, runtime);
- __ subptr(tmp, wordSize);
- __ movptr(queue_index, tmp);
- __ addptr(tmp, buffer);
-
- // prev_val (rax)
- f.load_argument(0, pre_val);
- __ movptr(Address(tmp, 0), pre_val);
- __ jmp(done);
-
- __ bind(runtime);
-
- save_live_registers(sasm, 3);
-
- // load the pre-value
- f.load_argument(0, rcx);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
-
- restore_live_registers(sasm);
-
- __ bind(done);
-
- __ pop(rdx);
- __ pop(rax);
- }
- break;
-
- case g1_post_barrier_slow_id:
- {
- StubFrame f(sasm, "g1_post_barrier", dont_gc_arguments);
-
- BarrierSet* bs = BarrierSet::barrier_set();
- if (bs->kind() != BarrierSet::G1BarrierSet) {
- __ movptr(rax, (int)id);
- __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
- __ should_not_reach_here();
- break;
- }
-
- // arg0: store_address
- Address store_addr(rbp, 2*BytesPerWord);
-
- Label done;
- Label enqueued;
- Label runtime;
-
- // At this point we know new_value is non-NULL and the new_value crosses regions.
- // Must check to see if card is already dirty
-
- const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
-
- Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
- Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
-
- __ push(rax);
- __ push(rcx);
-
- const Register cardtable = rax;
- const Register card_addr = rcx;
-
- f.load_argument(0, card_addr);
- __ shrptr(card_addr, CardTable::card_shift);
- // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
- // a valid address and therefore is not properly handled by the relocation code.
- __ movptr(cardtable, ci_card_table_address_as<intptr_t>());
- __ addptr(card_addr, cardtable);
-
- NOT_LP64(__ get_thread(thread);)
-
- __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
- __ jcc(Assembler::equal, done);
-
- __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
- __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
- __ jcc(Assembler::equal, done);
-
- // storing region crossing non-NULL, card is clean.
- // dirty card and log.
-
- __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
-
- const Register tmp = rdx;
- __ push(rdx);
-
- __ movptr(tmp, queue_index);
- __ testptr(tmp, tmp);
- __ jcc(Assembler::zero, runtime);
- __ subptr(tmp, wordSize);
- __ movptr(queue_index, tmp);
- __ addptr(tmp, buffer);
- __ movptr(Address(tmp, 0), card_addr);
- __ jmp(enqueued);
-
- __ bind(runtime);
-
- save_live_registers(sasm, 3);
-
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
-
- restore_live_registers(sasm);
-
- __ bind(enqueued);
- __ pop(rdx);
-
- __ bind(done);
- __ pop(rcx);
- __ pop(rax);
- }
- break;
-#endif // INCLUDE_ALL_GCS
-
case predicate_failed_trap_id:
{
StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
--- a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -32,6 +32,11 @@
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_LIRAssembler.hpp"
+#include "c1/c1_MacroAssembler.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
#define __ masm->
@@ -399,3 +404,193 @@
}
NOT_LP64(imasm->restore_bcp());
}
+
+#ifdef COMPILER1
+
+#undef __
+#define __ ce->masm()->
+
+void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ // At this point we know that marking is in progress.
+ // If do_load() is true then we have to emit the
+ // load of the previous value; otherwise it has already
+ // been loaded into _pre_val.
+
+ __ bind(*stub->entry());
+ assert(stub->pre_val()->is_register(), "Precondition.");
+
+ Register pre_val_reg = stub->pre_val()->as_register();
+
+ if (stub->do_load()) {
+ ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
+ }
+
+ __ cmpptr(pre_val_reg, (int32_t)NULL_WORD);
+ __ jcc(Assembler::equal, *stub->continuation());
+ ce->store_parameter(stub->pre_val()->as_register(), 0);
+ __ call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
+ __ jmp(*stub->continuation());
+
+}
+
+void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
+ G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
+ __ bind(*stub->entry());
+ assert(stub->addr()->is_register(), "Precondition.");
+ assert(stub->new_val()->is_register(), "Precondition.");
+ Register new_val_reg = stub->new_val()->as_register();
+ __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
+ __ jcc(Assembler::equal, *stub->continuation());
+ ce->store_parameter(stub->addr()->as_pointer_register(), 0);
+ __ call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin()));
+ __ jmp(*stub->continuation());
+}
+
+#undef __
+
+#define __ sasm->
+
+void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
+ __ prologue("g1_pre_barrier", false);
+ // arg0 : previous value of memory
+
+ __ push(rax);
+ __ push(rdx);
+
+ const Register pre_val = rax;
+ const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+ const Register tmp = rdx;
+
+ NOT_LP64(__ get_thread(thread);)
+
+ Address queue_active(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()));
+ Address queue_index(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset()));
+ Address buffer(thread, in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset()));
+
+ Label done;
+ Label runtime;
+
+ // Is marking still active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ cmpl(queue_active, 0);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ cmpb(queue_active, 0);
+ }
+ __ jcc(Assembler::equal, done);
+
+ // Can we store original value in the thread's buffer?
+
+ __ movptr(tmp, queue_index);
+ __ testptr(tmp, tmp);
+ __ jcc(Assembler::zero, runtime);
+ __ subptr(tmp, wordSize);
+ __ movptr(queue_index, tmp);
+ __ addptr(tmp, buffer);
+
+ // prev_val (rax)
+ __ load_parameter(0, pre_val);
+ __ movptr(Address(tmp, 0), pre_val);
+ __ jmp(done);
+
+ __ bind(runtime);
+
+ __ save_live_registers_no_oop_map(3, true);
+
+ // load the pre-value
+ __ load_parameter(0, rcx);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
+
+ __ restore_live_registers(true);
+
+ __ bind(done);
+
+ __ pop(rdx);
+ __ pop(rax);
+
+ __ epilogue();
+}
+
+void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
+ __ prologue("g1_post_barrier", false);
+
+ // arg0: store_address
+ Address store_addr(rbp, 2*BytesPerWord);
+
+ CardTableBarrierSet* ct =
+ barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
+ assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ Label done;
+ Label enqueued;
+ Label runtime;
+
+ // At this point we know new_value is non-NULL and the new_value crosses regions.
+ // Must check to see if card is already dirty
+
+ const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+
+ Address queue_index(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset()));
+ Address buffer(thread, in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset()));
+
+ __ push(rax);
+ __ push(rcx);
+
+ const Register cardtable = rax;
+ const Register card_addr = rcx;
+
+ __ load_parameter(0, card_addr);
+ __ shrptr(card_addr, CardTable::card_shift);
+ // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
+ // a valid address and therefore is not properly handled by the relocation code.
+ __ movptr(cardtable, (intptr_t)ct->card_table()->byte_map_base());
+ __ addptr(card_addr, cardtable);
+
+ NOT_LP64(__ get_thread(thread);)
+
+ __ cmpb(Address(card_addr, 0), (int)G1CardTable::g1_young_card_val());
+ __ jcc(Assembler::equal, done);
+
+ __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+ __ cmpb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
+ __ jcc(Assembler::equal, done);
+
+ // storing region crossing non-NULL, card is clean.
+ // dirty card and log.
+
+ __ movb(Address(card_addr, 0), (int)CardTable::dirty_card_val());
+
+ const Register tmp = rdx;
+ __ push(rdx);
+
+ __ movptr(tmp, queue_index);
+ __ testptr(tmp, tmp);
+ __ jcc(Assembler::zero, runtime);
+ __ subptr(tmp, wordSize);
+ __ movptr(queue_index, tmp);
+ __ addptr(tmp, buffer);
+ __ movptr(Address(tmp, 0), card_addr);
+ __ jmp(enqueued);
+
+ __ bind(runtime);
+
+ __ save_live_registers_no_oop_map(3, true);
+
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+
+ __ restore_live_registers(true);
+
+ __ bind(enqueued);
+ __ pop(rdx);
+
+ __ bind(done);
+ __ pop(rcx);
+ __ pop(rax);
+
+ __ epilogue();
+}
+
+#undef __
+
+#endif // COMPILER1
--- a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -28,6 +28,11 @@
#include "asm/macroAssembler.hpp"
#include "gc/shared/modRefBarrierSetAssembler.hpp"
+class LIR_Assembler;
+class StubAssembler;
+class G1PreBarrierStub;
+class G1PostBarrierStub;
+
class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count);
@@ -52,6 +57,12 @@
Address dst, Register val, Register tmp1, Register tmp2);
public:
+ void gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub);
+ void gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub);
+
+ void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm);
+ void generate_c1_post_barrier_runtime_stub(StubAssembler* sasm);
+
virtual void load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread);
};
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interp_masm.hpp"
+#include "runtime/jniHandles.hpp"
#define __ masm->
@@ -108,3 +109,8 @@
default: Unimplemented();
}
}
+
+void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath) {
+ __ clear_jweak_tag(robj);
+ __ movptr(robj, Address(robj, 0));
+}
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -44,6 +44,9 @@
virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2);
+ // Support for jniFastGetField to try resolving a jobject/jweak in native
+ virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register robj, Register tmp, Label& slowpath);
+
virtual void barrier_stubs_init() {}
};
--- a/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -90,8 +90,9 @@
// register obj is destroyed afterwards.
BarrierSet* bs = BarrierSet::barrier_set();
- CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(bs);
- assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
__ shrptr(obj, CardTable::card_shift);
@@ -102,15 +103,15 @@
// So this essentially converts an address to a displacement and it will
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
- intptr_t disp = (intptr_t) ct->card_table()->byte_map_base();
- if (__ is_simm32(disp)) {
- card_addr = Address(noreg, obj, Address::times_1, disp);
+ intptr_t byte_map_base = (intptr_t)ct->byte_map_base();
+ if (__ is_simm32(byte_map_base)) {
+ card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {
- // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
+ // By doing it as an ExternalAddress 'byte_map_base' could be converted to a rip-relative
// displacement and done in a single instruction given favorable mapping and a
// smarter version of as_Address. However, 'ExternalAddress' generates a relocation
// entry and that entry is not properly handled by the relocation code.
- AddressLiteral cardtable((address)ct->card_table()->byte_map_base(), relocInfo::none);
+ AddressLiteral cardtable((address)byte_map_base, relocInfo::none);
Address index(noreg, obj, Address::times_1);
card_addr = __ as_Address(ArrayAddress(cardtable, index));
}
@@ -118,7 +119,7 @@
int dirty = CardTable::dirty_card_val();
if (UseCondCardMark) {
Label L_already_dirty;
- if (UseConcMarkSweepGC) {
+ if (ct->scanned_concurrently()) {
__ membar(Assembler::StoreLoad);
}
__ cmpb(card_addr, dirty);
--- a/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/jniFastGetField_x86_64.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -24,6 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jniFastGetField.hpp"
#include "prims/jvm_misc.hpp"
@@ -81,12 +83,12 @@
// robj is data dependent on rcounter.
}
- __ clear_jweak_tag(robj);
-
- __ movptr(robj, Address(robj, 0)); // *obj
__ mov (roffset, c_rarg2);
__ shrptr(roffset, 2); // offset
+ BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->try_resolve_jobject_in_native(masm, robj, rscratch1, slow);
+
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
switch (type) {
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -37,6 +37,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -31,6 +31,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "utilities/preserveException.hpp"
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -533,92 +533,4 @@
#endif // PRODUCT
};
-//////////////////////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-// Code stubs for Garbage-First barriers.
-class G1PreBarrierStub: public CodeStub {
- private:
- bool _do_load;
- LIR_Opr _addr;
- LIR_Opr _pre_val;
- LIR_PatchCode _patch_code;
- CodeEmitInfo* _info;
-
- public:
- // Version that _does_ generate a load of the previous value from addr.
- // addr (the address of the field to be read) must be a LIR_Address
- // pre_val (a temporary register) must be a register;
- G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
- _addr(addr), _pre_val(pre_val), _do_load(true),
- _patch_code(patch_code), _info(info)
- {
- assert(_pre_val->is_register(), "should be temporary register");
- assert(_addr->is_address(), "should be the address of the field");
- }
-
- // Version that _does not_ generate load of the previous value; the
- // previous value is assumed to have already been loaded into pre_val.
- G1PreBarrierStub(LIR_Opr pre_val) :
- _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
- _patch_code(lir_patch_none), _info(NULL)
- {
- assert(_pre_val->is_register(), "should be a register");
- }
-
- LIR_Opr addr() const { return _addr; }
- LIR_Opr pre_val() const { return _pre_val; }
- LIR_PatchCode patch_code() const { return _patch_code; }
- CodeEmitInfo* info() const { return _info; }
- bool do_load() const { return _do_load; }
-
- virtual void emit_code(LIR_Assembler* e);
- virtual void visit(LIR_OpVisitState* visitor) {
- if (_do_load) {
- // don't pass in the code emit info since it's processed in the fast
- // path
- if (_info != NULL)
- visitor->do_slow_case(_info);
- else
- visitor->do_slow_case();
-
- visitor->do_input(_addr);
- visitor->do_temp(_pre_val);
- } else {
- visitor->do_slow_case();
- visitor->do_input(_pre_val);
- }
- }
-#ifndef PRODUCT
- virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
-#endif // PRODUCT
-};
-
-class G1PostBarrierStub: public CodeStub {
- private:
- LIR_Opr _addr;
- LIR_Opr _new_val;
-
- public:
- // addr (the address of the object head) and new_val must be registers.
- G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { }
-
- LIR_Opr addr() const { return _addr; }
- LIR_Opr new_val() const { return _new_val; }
-
- virtual void emit_code(LIR_Assembler* e);
- virtual void visit(LIR_OpVisitState* visitor) {
- // don't pass in the code emit info since it's processed in the fast path
- visitor->do_slow_case();
- visitor->do_input(_addr);
- visitor->do_input(_new_val);
- }
-#ifndef PRODUCT
- virtual void print_name(outputStream* out) const { out->print("G1PostBarrierStub"); }
-#endif // PRODUCT
-};
-
-#endif // INCLUDE_ALL_GCS
-//////////////////////////////////////////////////////////////////////////////////////////
-
#endif // SHARE_VM_C1_C1_CODESTUBS_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/c1/c1_Decorators.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_C1_C1_DECORATORS_HPP
+#define SHARE_VM_C1_C1_DECORATORS_HPP
+
+#include "oops/accessDecorators.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Use the C1_NEEDS_PATCHING decorator for situations when the access is using
+// an offset that is not yet known and will require patching
+const DecoratorSet C1_NEEDS_PATCHING = DECORATOR_LAST << 1;
+// Use the C1_MASK_BOOLEAN decorator for boolean accesses where the value
+// needs to be masked.
+const DecoratorSet C1_MASK_BOOLEAN = DECORATOR_LAST << 2;
+// The C1_WRITE_ACCESS decorator is used to mark writing accesses.
+const DecoratorSet C1_WRITE_ACCESS = DECORATOR_LAST << 3;
+// The C1_READ_ACCESS decorator is used to mark reading accesses.
+const DecoratorSet C1_READ_ACCESS = DECORATOR_LAST << 4;
+
+#endif // SHARE_VM_C1_C1_DECORATORS_HPP
--- a/src/hotspot/share/c1/c1_LIRAssembler.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRAssembler.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -260,6 +260,8 @@
#include CPU_HEADER(c1_LIRAssembler)
+ public:
+
static int call_stub_size() {
if (UseAOT) {
return _call_stub_size + _call_aot_stub_size;
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -34,19 +34,14 @@
#include "ci/ciInstance.hpp"
#include "ci/ciObjArray.hpp"
#include "ci/ciUtilities.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
-#endif // INCLUDE_ALL_GCS
#ifdef TRACE_HAVE_INTRINSICS
#include "trace/traceMacros.hpp"
#endif
@@ -313,11 +308,6 @@
//--------------------------------------------------------------
-void LIRGenerator::init() {
- _bs = BarrierSet::barrier_set();
-}
-
-
void LIRGenerator::block_do_prolog(BlockBegin* block) {
#ifndef PRODUCT
if (PrintIRWithLIR) {
@@ -1245,19 +1235,9 @@
info = state_for(x);
}
- LIR_Address* referent_field_adr =
- new LIR_Address(reference.result(), referent_offset, T_OBJECT);
-
- LIR_Opr result = rlock_result(x);
-
- __ load(referent_field_adr, result, info);
-
- // Register the value in the referent field with the pre-barrier
- pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
- result /* pre_val */,
- false /* do_load */,
- false /* patch */,
- NULL /* info */);
+ LIR_Opr result = rlock_result(x, T_OBJECT);
+ access_load_at(IN_HEAP | ON_WEAK_OOP_REF, T_OBJECT,
+ reference, LIR_OprFact::intConst(referent_offset), result);
}
// Example: clazz.isInstance(object)
@@ -1454,222 +1434,27 @@
return result;
}
-// Various barriers
-
-void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
- bool do_load, bool patch, CodeEmitInfo* info) {
- // Do the pre-write barrier, if any.
- switch (_bs->kind()) {
-#if INCLUDE_ALL_GCS
- case BarrierSet::G1BarrierSet:
- G1BarrierSet_pre_barrier(addr_opr, pre_val, do_load, patch, info);
- break;
-#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableBarrierSet:
- // No pre barriers
- break;
- default :
- ShouldNotReachHere();
-
- }
-}
-
-void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
- switch (_bs->kind()) {
-#if INCLUDE_ALL_GCS
- case BarrierSet::G1BarrierSet:
- G1BarrierSet_post_barrier(addr, new_val);
- break;
-#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableBarrierSet:
- CardTableBarrierSet_post_barrier(addr, new_val);
- break;
- default :
- ShouldNotReachHere();
- }
-}
-
-////////////////////////////////////////////////////////////////////////
-#if INCLUDE_ALL_GCS
-
-void LIRGenerator::G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
- bool do_load, bool patch, CodeEmitInfo* info) {
- // First we test whether marking is in progress.
- BasicType flag_type;
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- flag_type = T_INT;
- } else {
- guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
- "Assumption");
- // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
- // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
- flag_type = T_BOOLEAN;
- }
- LIR_Opr thrd = getThreadPointer();
- LIR_Address* mark_active_flag_addr =
- new LIR_Address(thrd, in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), flag_type);
- // Read the marking-in-progress flag.
- LIR_Opr flag_val = new_register(T_INT);
- __ load(mark_active_flag_addr, flag_val);
- __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
-
- LIR_PatchCode pre_val_patch_code = lir_patch_none;
-
- CodeStub* slow;
-
- if (do_load) {
- assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
- assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
-
- if (patch)
- pre_val_patch_code = lir_patch_normal;
-
- pre_val = new_register(T_OBJECT);
-
- if (!addr_opr->is_address()) {
- assert(addr_opr->is_register(), "must be");
- addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
- }
- slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
- } else {
- assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
- assert(pre_val->is_register(), "must be");
- assert(pre_val->type() == T_OBJECT, "must be an object");
- assert(info == NULL, "sanity");
-
- slow = new G1PreBarrierStub(pre_val);
- }
-
- __ branch(lir_cond_notEqual, T_INT, slow);
- __ branch_destination(slow->continuation());
+//------------------------field access--------------------------------------
+
+void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
+ assert(x->number_of_arguments() == 4, "wrong type");
+ LIRItem obj (x->argument_at(0), this); // object
+ LIRItem offset(x->argument_at(1), this); // offset of field
+ LIRItem cmp (x->argument_at(2), this); // value to compare with field
+ LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
+ assert(obj.type()->tag() == objectTag, "invalid type");
+
+ // In 64bit the type can be long, sparc doesn't have this assert
+ // assert(offset.type()->tag() == intTag, "invalid type");
+
+ assert(cmp.type()->tag() == type->tag(), "invalid type");
+ assert(val.type()->tag() == type->tag(), "invalid type");
+
+ LIR_Opr result = access_atomic_cmpxchg_at(IN_HEAP, as_BasicType(type),
+ obj, offset, cmp, val);
+ set_result(x, result);
}
-void LIRGenerator::G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
- // If the "new_val" is a constant NULL, no barrier is necessary.
- if (new_val->is_constant() &&
- new_val->as_constant_ptr()->as_jobject() == NULL) return;
-
- if (!new_val->is_register()) {
- LIR_Opr new_val_reg = new_register(T_OBJECT);
- if (new_val->is_constant()) {
- __ move(new_val, new_val_reg);
- } else {
- __ leal(new_val, new_val_reg);
- }
- new_val = new_val_reg;
- }
- assert(new_val->is_register(), "must be a register at this point");
-
- if (addr->is_address()) {
- LIR_Address* address = addr->as_address_ptr();
- LIR_Opr ptr = new_pointer_register();
- if (!address->index()->is_valid() && address->disp() == 0) {
- __ move(address->base(), ptr);
- } else {
- assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
- __ leal(addr, ptr);
- }
- addr = ptr;
- }
- assert(addr->is_register(), "must be a register at this point");
-
- LIR_Opr xor_res = new_pointer_register();
- LIR_Opr xor_shift_res = new_pointer_register();
- if (TwoOperandLIRForm ) {
- __ move(addr, xor_res);
- __ logical_xor(xor_res, new_val, xor_res);
- __ move(xor_res, xor_shift_res);
- __ unsigned_shift_right(xor_shift_res,
- LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
- xor_shift_res,
- LIR_OprDesc::illegalOpr());
- } else {
- __ logical_xor(addr, new_val, xor_res);
- __ unsigned_shift_right(xor_res,
- LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
- xor_shift_res,
- LIR_OprDesc::illegalOpr());
- }
-
- if (!new_val->is_register()) {
- LIR_Opr new_val_reg = new_register(T_OBJECT);
- __ leal(new_val, new_val_reg);
- new_val = new_val_reg;
- }
- assert(new_val->is_register(), "must be a register at this point");
-
- __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
-
- CodeStub* slow = new G1PostBarrierStub(addr, new_val);
- __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
- __ branch_destination(slow->continuation());
-}
-
-#endif // INCLUDE_ALL_GCS
-////////////////////////////////////////////////////////////////////////
-
-void LIRGenerator::CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
- LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
- if (addr->is_address()) {
- LIR_Address* address = addr->as_address_ptr();
- // ptr cannot be an object because we use this barrier for array card marks
- // and addr can point in the middle of an array.
- LIR_Opr ptr = new_pointer_register();
- if (!address->index()->is_valid() && address->disp() == 0) {
- __ move(address->base(), ptr);
- } else {
- assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
- __ leal(addr, ptr);
- }
- addr = ptr;
- }
- assert(addr->is_register(), "must be a register at this point");
-
-#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
- CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
-#else
- LIR_Opr tmp = new_pointer_register();
- if (TwoOperandLIRForm) {
- __ move(addr, tmp);
- __ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
- } else {
- __ unsigned_shift_right(addr, CardTable::card_shift, tmp);
- }
-
- LIR_Address* card_addr;
- if (can_inline_as_constant(card_table_base)) {
- card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
- } else {
- card_addr = new LIR_Address(tmp, load_constant(card_table_base), T_BYTE);
- }
-
- LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
- if (UseCondCardMark) {
- LIR_Opr cur_value = new_register(T_INT);
- if (UseConcMarkSweepGC) {
- __ membar_storeload();
- }
- __ move(card_addr, cur_value);
-
- LabelObj* L_already_dirty = new LabelObj();
- __ cmp(lir_cond_equal, cur_value, dirty);
- __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
- __ move(dirty, card_addr);
- __ branch_destination(L_already_dirty->label());
- } else {
-#if INCLUDE_ALL_GCS
- if (UseConcMarkSweepGC && CMSPrecleaningEnabled) {
- __ membar_storestore();
- }
-#endif
- __ move(dirty, card_addr);
- }
-#endif
-}
-
-
-//------------------------field access--------------------------------------
-
// Comment copied form templateTable_i486.cpp
// ----------------------------------------------------------------------------
// Volatile variables demand their effects be made known to all CPU's in
@@ -1702,7 +1487,6 @@
bool needs_patching = x->needs_patching();
bool is_volatile = x->field()->is_volatile();
BasicType field_type = x->field_type();
- bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
CodeEmitInfo* info = NULL;
if (needs_patching) {
@@ -1717,7 +1501,6 @@
}
}
-
LIRItem object(x->obj(), this);
LIRItem value(x->value(), this);
@@ -1755,48 +1538,147 @@
__ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
- LIR_Address* address;
+ DecoratorSet decorators = IN_HEAP;
+ if (is_volatile) {
+ decorators |= MO_SEQ_CST;
+ }
if (needs_patching) {
- // we need to patch the offset in the instruction so don't allow
- // generate_address to try to be smart about emitting the -1.
- // Otherwise the patching code won't know how to find the
- // instruction to patch.
- address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
+ decorators |= C1_NEEDS_PATCHING;
+ }
+
+ access_store_at(decorators, field_type, object, LIR_OprFact::intConst(x->offset()),
+ value.result(), info != NULL ? new CodeEmitInfo(info) : NULL, info);
+}
+
+void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
+ assert(x->is_pinned(),"");
+ bool needs_range_check = x->compute_needs_range_check();
+ bool use_length = x->length() != NULL;
+ bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
+ bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
+ !get_jobject_constant(x->value())->is_null_object() ||
+ x->should_profile());
+
+ LIRItem array(x->array(), this);
+ LIRItem index(x->index(), this);
+ LIRItem value(x->value(), this);
+ LIRItem length(this);
+
+ array.load_item();
+ index.load_nonconstant();
+
+ if (use_length && needs_range_check) {
+ length.set_instruction(x->length());
+ length.load_item();
+
+ }
+ if (needs_store_check || x->check_boolean()) {
+ value.load_item();
} else {
- address = generate_address(object.result(), x->offset(), field_type);
- }
-
- if (is_volatile && os::is_MP()) {
- __ membar_release();
+ value.load_for_store(x->elt_type());
}
- if (is_oop) {
- // Do the pre-write barrier, if any.
- pre_barrier(LIR_OprFact::address(address),
- LIR_OprFact::illegalOpr /* pre_val */,
- true /* do_load*/,
- needs_patching,
- (info ? new CodeEmitInfo(info) : NULL));
+ set_no_result(x);
+
+ // the CodeEmitInfo must be duplicated for each different
+ // LIR-instruction because spilling can occur anywhere between two
+ // instructions and so the debug information must be different
+ CodeEmitInfo* range_check_info = state_for(x);
+ CodeEmitInfo* null_check_info = NULL;
+ if (x->needs_null_check()) {
+ null_check_info = new CodeEmitInfo(range_check_info);
+ }
+
+ if (GenerateRangeChecks && needs_range_check) {
+ if (use_length) {
+ __ cmp(lir_cond_belowEqual, length.result(), index.result());
+ __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
+ } else {
+ array_range_check(array.result(), index.result(), null_check_info, range_check_info);
+ // range_check also does the null check
+ null_check_info = NULL;
+ }
}
- bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
- if (needs_atomic_access && !needs_patching) {
- volatile_field_store(value.result(), address, info);
- } else {
- LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
- __ store(value.result(), address, info, patch_code);
+ if (GenerateArrayStoreCheck && needs_store_check) {
+ CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
+ array_store_check(value.result(), array.result(), store_check_info, x->profiled_method(), x->profiled_bci());
+ }
+
+ DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
+ if (x->check_boolean()) {
+ decorators |= C1_MASK_BOOLEAN;
}
- if (is_oop) {
- // Store to object so mark the card of the header
- post_barrier(object.result(), value.result());
- }
-
- if (!support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
- __ membar();
+ access_store_at(decorators, x->elt_type(), array, index.result(), value.result(),
+ NULL, null_check_info);
+}
+
+void LIRGenerator::access_load_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIR_Opr offset, LIR_Opr result,
+ CodeEmitInfo* patch_info, CodeEmitInfo* load_emit_info) {
+ decorators |= C1_READ_ACCESS;
+ LIRAccess access(this, decorators, base, offset, type, patch_info, load_emit_info);
+ if (access.is_raw()) {
+ _barrier_set->BarrierSetC1::load_at(access, result);
+ } else {
+ _barrier_set->load_at(access, result);
}
}
+void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIR_Opr offset, LIR_Opr value,
+ CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
+ decorators |= C1_WRITE_ACCESS;
+ LIRAccess access(this, decorators, base, offset, type, patch_info, store_emit_info);
+ if (access.is_raw()) {
+ _barrier_set->BarrierSetC1::store_at(access, value);
+ } else {
+ _barrier_set->store_at(access, value);
+ }
+}
+
+LIR_Opr LIRGenerator::access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value) {
+ // Atomic operations are SEQ_CST by default
+ decorators |= C1_READ_ACCESS;
+ decorators |= C1_WRITE_ACCESS;
+ decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
+ LIRAccess access(this, decorators, base, offset, type);
+ if (access.is_raw()) {
+ return _barrier_set->BarrierSetC1::atomic_cmpxchg_at(access, cmp_value, new_value);
+ } else {
+ return _barrier_set->atomic_cmpxchg_at(access, cmp_value, new_value);
+ }
+}
+
+LIR_Opr LIRGenerator::access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIRItem& offset, LIRItem& value) {
+ // Atomic operations are SEQ_CST by default
+ decorators |= C1_READ_ACCESS;
+ decorators |= C1_WRITE_ACCESS;
+ decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
+ LIRAccess access(this, decorators, base, offset, type);
+ if (access.is_raw()) {
+ return _barrier_set->BarrierSetC1::atomic_xchg_at(access, value);
+ } else {
+ return _barrier_set->atomic_xchg_at(access, value);
+ }
+}
+
+LIR_Opr LIRGenerator::access_atomic_add_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIRItem& offset, LIRItem& value) {
+ // Atomic operations are SEQ_CST by default
+ decorators |= C1_READ_ACCESS;
+ decorators |= C1_WRITE_ACCESS;
+ decorators |= ((decorators & MO_DECORATOR_MASK) != 0) ? MO_SEQ_CST : 0;
+ LIRAccess access(this, decorators, base, offset, type);
+ if (access.is_raw()) {
+ return _barrier_set->BarrierSetC1::atomic_add_at(access, value);
+ } else {
+ return _barrier_set->atomic_add_at(access, value);
+ }
+}
void LIRGenerator::do_LoadField(LoadField* x) {
bool needs_patching = x->needs_patching();
@@ -1843,33 +1725,18 @@
__ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
}
- LIR_Opr reg = rlock_result(x, field_type);
- LIR_Address* address;
- if (needs_patching) {
- // we need to patch the offset in the instruction so don't allow
- // generate_address to try to be smart about emitting the -1.
- // Otherwise the patching code won't know how to find the
- // instruction to patch.
- address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
- } else {
- address = generate_address(object.result(), x->offset(), field_type);
+ DecoratorSet decorators = IN_HEAP;
+ if (is_volatile) {
+ decorators |= MO_SEQ_CST;
}
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile && os::is_MP()) {
- __ membar();
+ if (needs_patching) {
+ decorators |= C1_NEEDS_PATCHING;
}
- bool needs_atomic_access = is_volatile || AlwaysAtomicAccesses;
- if (needs_atomic_access && !needs_patching) {
- volatile_field_load(address, reg, info);
- } else {
- LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
- __ load(address, reg, info, patch_code);
- }
-
- if (is_volatile && os::is_MP()) {
- __ membar_acquire();
- }
+ LIR_Opr result = rlock_result(x, field_type);
+ access_load_at(decorators, field_type,
+ object, LIR_OprFact::intConst(x->offset()), result,
+ info ? new CodeEmitInfo(info) : NULL, info);
}
@@ -1968,9 +1835,6 @@
}
}
- // emit array address setup early so it schedules better
- LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
-
if (GenerateRangeChecks && needs_range_check) {
if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
__ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
@@ -1986,7 +1850,12 @@
}
}
- __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
+ DecoratorSet decorators = IN_HEAP | IN_HEAP_ARRAY;
+
+ LIR_Opr result = rlock_result(x, x->elt_type());
+ access_load_at(decorators, x->elt_type(),
+ array, index.result(), result,
+ NULL, null_check_info);
}
@@ -2272,157 +2141,21 @@
off.load_item();
src.load_item();
- LIR_Opr value = rlock_result(x, x->basic_type());
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) {
- __ membar();
+ DecoratorSet decorators = IN_HEAP;
+
+ if (x->is_volatile()) {
+ decorators |= MO_SEQ_CST;
+ }
+ if (type == T_BOOLEAN) {
+ decorators |= C1_MASK_BOOLEAN;
}
-
- get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
-
-#if INCLUDE_ALL_GCS
- // We might be reading the value of the referent field of a
- // Reference object in order to attach it back to the live
- // object graph. If G1 is enabled then we need to record
- // the value that is being returned in an SATB log buffer.
- //
- // We need to generate code similar to the following...
- //
- // if (offset == java_lang_ref_Reference::referent_offset) {
- // if (src != NULL) {
- // if (klass(src)->reference_type() != REF_NONE) {
- // pre_barrier(..., value, ...);
- // }
- // }
- // }
-
- if (UseG1GC && type == T_OBJECT) {
- bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
- bool gen_offset_check = true; // Assume we need to generate the offset guard.
- bool gen_source_check = true; // Assume we need to check the src object for null.
- bool gen_type_check = true; // Assume we need to check the reference_type.
-
- if (off.is_constant()) {
- jlong off_con = (off.type()->is_int() ?
- (jlong) off.get_jint_constant() :
- off.get_jlong_constant());
-
-
- if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
- // The constant offset is something other than referent_offset.
- // We can skip generating/checking the remaining guards and
- // skip generation of the code stub.
- gen_pre_barrier = false;
- } else {
- // The constant offset is the same as referent_offset -
- // we do not need to generate a runtime offset check.
- gen_offset_check = false;
- }
- }
-
- // We don't need to generate stub if the source object is an array
- if (gen_pre_barrier && src.type()->is_array()) {
- gen_pre_barrier = false;
- }
-
- if (gen_pre_barrier) {
- // We still need to continue with the checks.
- if (src.is_constant()) {
- ciObject* src_con = src.get_jobject_constant();
- guarantee(src_con != NULL, "no source constant");
-
- if (src_con->is_null_object()) {
- // The constant src object is null - We can skip
- // generating the code stub.
- gen_pre_barrier = false;
- } else {
- // Non-null constant source object. We still have to generate
- // the slow stub - but we don't need to generate the runtime
- // null object check.
- gen_source_check = false;
- }
- }
- }
- if (gen_pre_barrier && !PatchALot) {
- // Can the klass of object be statically determined to be
- // a sub-class of Reference?
- ciType* type = src.value()->declared_type();
- if ((type != NULL) && type->is_loaded()) {
- if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
- gen_type_check = false;
- } else if (type->is_klass() &&
- !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
- // Not Reference and not Object klass.
- gen_pre_barrier = false;
- }
- }
- }
-
- if (gen_pre_barrier) {
- LabelObj* Lcont = new LabelObj();
-
- // We can have generate one runtime check here. Let's start with
- // the offset check.
- if (gen_offset_check) {
- // if (offset != referent_offset) -> continue
- // If offset is an int then we can do the comparison with the
- // referent_offset constant; otherwise we need to move
- // referent_offset into a temporary register and generate
- // a reg-reg compare.
-
- LIR_Opr referent_off;
-
- if (off.type()->is_int()) {
- referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
- } else {
- assert(off.type()->is_long(), "what else?");
- referent_off = new_register(T_LONG);
- __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
- }
- __ cmp(lir_cond_notEqual, off.result(), referent_off);
- __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
- }
- if (gen_source_check) {
- // offset is a const and equals referent offset
- // if (source == null) -> continue
- __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
- __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
- }
- LIR_Opr src_klass = new_register(T_OBJECT);
- if (gen_type_check) {
- // We have determined that offset == referent_offset && src != null.
- // if (src->_klass->_reference_type == REF_NONE) -> continue
- __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
- LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
- LIR_Opr reference_type = new_register(T_INT);
- __ move(reference_type_addr, reference_type);
- __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
- __ branch(lir_cond_equal, T_INT, Lcont->label());
- }
- {
- // We have determined that src->_klass->_reference_type != REF_NONE
- // so register the value in the referent field with the pre-barrier.
- pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
- value /* pre_val */,
- false /* do_load */,
- false /* patch */,
- NULL /* info */);
- }
- __ branch_destination(Lcont->label());
- }
+ if (type == T_ARRAY || type == T_OBJECT) {
+ decorators |= ON_UNKNOWN_OOP_REF;
}
-#endif // INCLUDE_ALL_GCS
-
- if (x->is_volatile() && os::is_MP()) __ membar_acquire();
-
- /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
- if (type == T_BOOLEAN) {
- LabelObj* equalZeroLabel = new LabelObj();
- __ cmp(lir_cond_equal, value, 0);
- __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
- __ move(LIR_OprFact::intConst(1), value);
- __ branch_destination(equalZeroLabel->label());
- }
+
+ LIR_Opr result = rlock_result(x, type);
+ access_load_at(decorators, type,
+ src, off.result(), result);
}
@@ -2442,11 +2175,36 @@
set_no_result(x);
- if (x->is_volatile() && os::is_MP()) __ membar_release();
- put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
- if (!support_IRIW_for_not_multiple_copy_atomic_cpu && x->is_volatile() && os::is_MP()) __ membar();
+ DecoratorSet decorators = IN_HEAP;
+ if (type == T_ARRAY || type == T_OBJECT) {
+ decorators |= ON_UNKNOWN_OOP_REF;
+ }
+ if (x->is_volatile()) {
+ decorators |= MO_SEQ_CST;
+ }
+ access_store_at(decorators, type, src, off.result(), data.result());
}
+void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
+ BasicType type = x->basic_type();
+ LIRItem src(x->object(), this);
+ LIRItem off(x->offset(), this);
+ LIRItem value(x->value(), this);
+
+ DecoratorSet decorators = IN_HEAP | MO_SEQ_CST;
+
+ if (type == T_ARRAY || type == T_OBJECT) {
+ decorators |= ON_UNKNOWN_OOP_REF;
+ }
+
+ LIR_Opr result;
+ if (x->is_add()) {
+ result = access_atomic_add_at(decorators, type, src, off, value);
+ } else {
+ result = access_atomic_xchg_at(decorators, type, src, off, value);
+ }
+ set_result(x, result);
+}
void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
int lng = x->length();
@@ -3826,25 +3584,30 @@
}
}
+LIR_Opr LIRGenerator::mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
+ LIR_Opr value_fixed = rlock_byte(T_BYTE);
+ if (TwoOperandLIRForm) {
+ __ move(value, value_fixed);
+ __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
+ } else {
+ __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
+ }
+ LIR_Opr klass = new_register(T_METADATA);
+ __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
+ null_check_info = NULL;
+ LIR_Opr layout = new_register(T_INT);
+ __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
+ int diffbit = Klass::layout_helper_boolean_diffbit();
+ __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
+ __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
+ __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
+ value = value_fixed;
+ return value;
+}
+
LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
if (x->check_boolean()) {
- LIR_Opr value_fixed = rlock_byte(T_BYTE);
- if (TwoOperandLIRForm) {
- __ move(value, value_fixed);
- __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
- } else {
- __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
- }
- LIR_Opr klass = new_register(T_METADATA);
- __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
- null_check_info = NULL;
- LIR_Opr layout = new_register(T_INT);
- __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
- int diffbit = Klass::layout_helper_boolean_diffbit();
- __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
- __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
- __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
- value = value_fixed;
+ value = mask_boolean(array, value, null_check_info);
}
return value;
}
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,12 +25,16 @@
#ifndef SHARE_VM_C1_C1_LIRGENERATOR_HPP
#define SHARE_VM_C1_C1_LIRGENERATOR_HPP
+#include "c1/c1_Decorators.hpp"
#include "c1/c1_Instruction.hpp"
#include "c1/c1_LIR.hpp"
#include "ci/ciMethodData.hpp"
+#include "gc/shared/barrierSet.hpp"
#include "utilities/macros.hpp"
#include "utilities/sizes.hpp"
+class BarrierSetC1;
+
// The classes responsible for code emission and register allocation
@@ -165,7 +169,6 @@
Values _instruction_for_operand;
BitMap2D _vreg_flags; // flags which can be set on a per-vreg basis
LIR_List* _lir;
- BarrierSet* _bs;
LIRGenerator* gen() {
return this;
@@ -173,6 +176,7 @@
void print_if_not_loaded(const NewInstance* new_instance) PRODUCT_RETURN;
+ public:
#ifdef ASSERT
LIR_List* lir(const char * file, int line) const {
_lir->set_file_and_line(file, line);
@@ -183,6 +187,7 @@
return _lir;
}
+ private:
// a simple cache of constants used within a block
GrowableArray<LIR_Const*> _constants;
LIR_OprList _reg_for_constants;
@@ -190,6 +195,7 @@
friend class PhiResolver;
+ public:
// unified bailout support
void bailout(const char* msg) const { compilation()->bailout(msg); }
bool bailed_out() const { return compilation()->bailed_out(); }
@@ -233,14 +239,15 @@
void move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
void move_to_phi(ValueStack* cur_state);
- // code emission
- void do_ArithmeticOp_Long (ArithmeticOp* x);
- void do_ArithmeticOp_Int (ArithmeticOp* x);
- void do_ArithmeticOp_FPU (ArithmeticOp* x);
-
// platform dependent
LIR_Opr getThreadPointer();
+ private:
+ // code emission
+ void do_ArithmeticOp_Long(ArithmeticOp* x);
+ void do_ArithmeticOp_Int (ArithmeticOp* x);
+ void do_ArithmeticOp_FPU (ArithmeticOp* x);
+
void do_RegisterFinalizer(Intrinsic* x);
void do_isInstance(Intrinsic* x);
void do_isPrimitive(Intrinsic* x);
@@ -258,6 +265,7 @@
void do_update_CRC32C(Intrinsic* x);
void do_vectorizedMismatch(Intrinsic* x);
+ public:
LIR_Opr call_runtime(BasicTypeArray* signature, LIRItemList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
LIR_Opr call_runtime(BasicTypeArray* signature, LIR_OprList* args, address entry, ValueType* result_type, CodeEmitInfo* info);
@@ -265,27 +273,37 @@
LIR_Opr call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info);
LIR_Opr call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info);
- // GC Barriers
+ // Access API
+
+ private:
+ BarrierSetC1 *_barrier_set;
+
+ public:
+ void access_store_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIR_Opr offset, LIR_Opr value,
+ CodeEmitInfo* patch_info = NULL, CodeEmitInfo* store_emit_info = NULL);
+
+ void access_load_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIR_Opr offset, LIR_Opr result,
+ CodeEmitInfo* patch_info = NULL, CodeEmitInfo* load_emit_info = NULL);
- // generic interface
+ LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
+
+ LIR_Opr access_atomic_xchg_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIRItem& offset, LIRItem& value);
- void pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, bool do_load, bool patch, CodeEmitInfo* info);
- void post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
+ LIR_Opr access_atomic_add_at(DecoratorSet decorators, BasicType type,
+ LIRItem& base, LIRItem& offset, LIRItem& value);
+
+ // These need to guarantee JMM volatile semantics are preserved on each platform
+ // and requires one implementation per architecture.
+ LIR_Opr atomic_cmpxchg(BasicType type, LIR_Opr addr, LIRItem& cmp_value, LIRItem& new_value);
+ LIR_Opr atomic_xchg(BasicType type, LIR_Opr addr, LIRItem& new_value);
+ LIR_Opr atomic_add(BasicType type, LIR_Opr addr, LIRItem& new_value);
// specific implementations
- // pre barriers
-
- void G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
- bool do_load, bool patch, CodeEmitInfo* info);
-
- // post barriers
-
- void G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
- void CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
-#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
- void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
-#endif
-
+ void array_store_check(LIR_Opr value, LIR_Opr array, CodeEmitInfo* store_check_info, ciMethod* profiled_method, int profiled_bci);
static LIR_Opr result_register_for(ValueType* type, bool callee = false);
@@ -354,7 +372,7 @@
LIR_Address* generate_address(LIR_Opr base, int disp, BasicType type) {
return generate_address(base, LIR_OprFact::illegalOpr, 0, disp, type);
}
- LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type, bool needs_card_mark);
+ LIR_Address* emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr, BasicType type);
// the helper for generate_address
void add_large_constant(LIR_Opr src, int c, LIR_Opr dest);
@@ -433,8 +451,6 @@
void do_soft_float_compare(If *x);
#endif // __SOFTFP__
- void init();
-
SwitchRangeArray* create_lookup_ranges(TableSwitch* x);
SwitchRangeArray* create_lookup_ranges(LookupSwitch* x);
void do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux);
@@ -452,6 +468,7 @@
void profile_arguments(ProfileCall* x);
void profile_parameters(Base* x);
void profile_parameters_at_call(ProfileCall* x);
+ LIR_Opr mask_boolean(LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
LIR_Opr maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info);
public:
@@ -478,8 +495,8 @@
: _compilation(compilation)
, _method(method)
, _virtual_register_number(LIR_OprDesc::vreg_base)
- , _vreg_flags(num_vreg_flags) {
- init();
+ , _vreg_flags(num_vreg_flags)
+ , _barrier_set(BarrierSet::barrier_set()->barrier_set_c1()) {
}
// for virtual registers, maps them back to Phi's or Local's
--- a/src/hotspot/share/c1/c1_MacroAssembler.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_MacroAssembler.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -74,6 +74,9 @@
void set_frame_size(int size);
void set_num_rt_args(int args);
+ void save_live_registers();
+ void restore_live_registers_without_return();
+
// accessors
const char* name() const { return _name; }
bool must_gc_arguments() const { return _must_gc_arguments; }
@@ -86,6 +89,9 @@
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1);
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2);
int call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3);
+
+ void prologue(const char* name, bool must_gc_arguments);
+ void epilogue();
};
#endif // SHARE_VM_C1_C1_MACROASSEMBLER_HPP
--- a/src/hotspot/share/c1/c1_Runtime1.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -39,6 +39,7 @@
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreter.hpp"
@@ -178,9 +179,17 @@
}
}
+class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
+ private:
+ Runtime1::StubID _id;
+ public:
+ StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
+ virtual OopMapSet* generate_code(StubAssembler* sasm) {
+ return Runtime1::generate_code_for(_id, sasm);
+ }
+};
-void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
- assert(0 <= id && id < number_of_ids, "illegal stub id");
+CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
ResourceMark rm;
// create code buffer for code storage
CodeBuffer code(buffer_blob);
@@ -192,33 +201,12 @@
Compilation::setup_code_buffer(&code, 0);
// create assembler for code generation
- StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
+ StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
// generate code for runtime stub
- oop_maps = generate_code_for(id, sasm);
+ oop_maps = cl->generate_code(sasm);
assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
"if stub has an oop map it must have a valid frame size");
-
-#ifdef ASSERT
- // Make sure that stubs that need oopmaps have them
- switch (id) {
- // These stubs don't need to have an oopmap
- case dtrace_object_alloc_id:
- case g1_pre_barrier_slow_id:
- case g1_post_barrier_slow_id:
- case slow_subtype_check_id:
- case fpu2long_stub_id:
- case unwind_exception_id:
- case counter_overflow_id:
-#if defined(SPARC) || defined(PPC32)
- case handle_exception_nofpu_id: // Unused on sparc
-#endif
- break;
-
- // All other stubs should have oopmaps
- default:
- assert(oop_maps != NULL, "must have an oopmap");
- }
-#endif
+ assert(!expect_oop_map || oop_maps != NULL, "must have an oopmap");
// align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
sasm->align(BytesPerWord);
@@ -228,18 +216,43 @@
frame_size = sasm->frame_size();
must_gc_arguments = sasm->must_gc_arguments();
// create blob - distinguish a few special cases
- CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
+ CodeBlob* blob = RuntimeStub::new_runtime_stub(name,
&code,
CodeOffsets::frame_never_safe,
frame_size,
oop_maps,
must_gc_arguments);
+ assert(blob != NULL, "blob must exist");
+ return blob;
+}
+
+void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
+ assert(0 <= id && id < number_of_ids, "illegal stub id");
+ bool expect_oop_map = true;
+#ifdef ASSERT
+ // Make sure that stubs that need oopmaps have them
+ switch (id) {
+ // These stubs don't need to have an oopmap
+ case dtrace_object_alloc_id:
+ case slow_subtype_check_id:
+ case fpu2long_stub_id:
+ case unwind_exception_id:
+ case counter_overflow_id:
+#if defined(SPARC) || defined(PPC32)
+ case handle_exception_nofpu_id: // Unused on sparc
+#endif
+ expect_oop_map = false;
+ break;
+ default:
+ break;
+ }
+#endif
+ StubIDStubAssemblerCodeGenClosure cl(id);
+ CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
// install blob
- assert(blob != NULL, "blob must exist");
_blobs[id] = blob;
}
-
void Runtime1::initialize(BufferBlob* blob) {
// platform-dependent initialization
initialize_pd();
@@ -257,9 +270,10 @@
}
}
#endif
+ BarrierSetC1* bs = BarrierSet::barrier_set()->barrier_set_c1();
+ bs->generate_c1_runtime_stubs(blob);
}
-
CodeBlob* Runtime1::blob_for(StubID id) {
assert(0 <= id && id < number_of_ids, "illegal stub id");
return _blobs[id];
--- a/src/hotspot/share/c1/c1_Runtime1.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -68,8 +68,6 @@
stub(load_klass_patching) \
stub(load_mirror_patching) \
stub(load_appendix_patching) \
- stub(g1_pre_barrier_slow) \
- stub(g1_post_barrier_slow) \
stub(fpu2long_stub) \
stub(counter_overflow) \
stub(predicate_failed_trap) \
@@ -80,6 +78,11 @@
#define STUB_NAME(x) #x " Runtime1 stub",
#define LAST_STUB_NAME(x) #x " Runtime1 stub"
+class StubAssemblerCodeGenClosure: public Closure {
+ public:
+ virtual OopMapSet* generate_code(StubAssembler* sasm) = 0;
+};
+
class Runtime1: public AllStatic {
friend class VMStructs;
friend class ArrayCopyStub;
@@ -121,8 +124,11 @@
static const char* _blob_names[];
// stub generation
+ public:
+ static CodeBlob* generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl);
static void generate_blob_for(BufferBlob* blob, StubID id);
static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm);
+ private:
static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm);
static void generate_unwind_exception(StubAssembler *sasm);
--- a/src/hotspot/share/code/dependencies.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/code/dependencies.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -35,6 +35,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
--- a/src/hotspot/share/code/nmethod.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -47,6 +47,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
--- a/src/hotspot/share/code/relocInfo.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/code/relocInfo.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "code/relocInfo.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compressedOops.inline.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/stubCodeGenerator.hpp"
#include "utilities/copy.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/commandLineFlagConstraintsCMS.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/commandLineFlagConstraintsGC.hpp"
-#include "memory/universe.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-static Flag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
- // CMSWorkQueueDrainThreshold is verified to be less than max_juint
- if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
- CommandLineError::print(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
- UINTX_FORMAT ") is too large\n",
- threads, threshold);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
- // To avoid overflow at ParScanClosure::do_oop_work.
- if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
- CommandLineError::print(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to " UINT32_FORMAT " for CMS GC\n",
- value, (max_jint / 10));
- return Flag::VIOLATES_CONSTRAINT;
- }
- return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
-}
-Flag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
- CommandLineError::print(verbose,
- "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
- value, ((uintx)max_jint / (uintx)ParallelGCThreads));
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
- if (UseConcMarkSweepGC) {
- // ParGCCardsPerStrideChunk should be compared with card table size.
- size_t heap_size = Universe::heap()->reserved_region().word_size();
- CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
- size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
-
- if ((size_t)value > card_table_size) {
- CommandLineError::print(verbose,
- "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
- "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
- value, card_table_size);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
- // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
- // not to make an overflow with ParallelGCThreads from its constraint function.
- uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
- uintx ergo_max = max_uintx / n_strides;
- if ((uintx)value > ergo_max) {
- CommandLineError::print(verbose,
- "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
- value, ergo_max);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
-
- if (UseConcMarkSweepGC) {
- if (value > CMSOldPLABMax) {
- CommandLineError::print(verbose,
- "CMSOldPLABMin (" SIZE_FORMAT ") must be "
- "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
- value, CMSOldPLABMax);
- return Flag::VIOLATES_CONSTRAINT;
- }
- status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
- }
- return status;
-}
-
-Flag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
-
- if (UseConcMarkSweepGC) {
- status = MaxPLABSizeBounds("CMSOldPLABMax", value, verbose);
- }
- return status;
-}
-
-static Flag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
- if (UseConcMarkSweepGC) {
- ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
- const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
- if (value > ergo_max) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
- "which is based on the maximum size of the old generation of the Java heap\n",
- name, value, ergo_max);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
- Flag::Error status = CMSReservedAreaConstraintFunc("CMSRescanMultiple", value, verbose);
-
- if (status == Flag::SUCCESS && UseConcMarkSweepGC) {
- // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
- // to be aligned to CardTable::card_size * BitsPerWord.
- // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
- // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
- if (value % HeapWordSize != 0) {
- CommandLineError::print(verbose,
- "CMSRescanMultiple (" SIZE_FORMAT ") must be "
- "a multiple of " SIZE_FORMAT "\n",
- value, HeapWordSize);
- status = Flag::VIOLATES_CONSTRAINT;
- }
- }
-
- return status;
-}
-
-Flag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
- return CMSReservedAreaConstraintFunc("CMSConcMarkMultiple", value, verbose);
-}
-
-Flag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
- CommandLineError::print(verbose,
- "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
- "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
- value, CMSPrecleanNumerator);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
- CommandLineError::print(verbose,
- "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
- "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
- value, CMSPrecleanDenominator);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC) {
- size_t max_capacity = GenCollectedHeap::heap()->young_gen()->max_capacity();
- if (value > max_uintx - max_capacity) {
- CommandLineError::print(verbose,
- "CMSSamplingGrain (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
- value, max_uintx - max_capacity);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) {
- if (UseConcMarkSweepGC) {
- return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(ParallelGCThreads, value, verbose);
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
- // Skip for current default value.
- if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
- // CMSBitMapYieldQuantum should be compared with mark bitmap size.
- ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
- size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
-
- if (value > bitmap_size) {
- CommandLineError::print(verbose,
- "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
- "be less than or equal to bitmap size (" SIZE_FORMAT ") "
- "whose size corresponds to the size of old generation of the Java heap\n",
- value, bitmap_size);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
- if (value == 0) {
- CommandLineError::print(verbose,
- "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- }
- // For CMS, OldPLABSize is the number of free blocks of a given size that are used when
- // replenishing the local per-worker free list caches.
- // For more details, please refer to Arguments::set_cms_and_parnew_gc_flags().
- return MaxPLABSizeBounds("OldPLABSize", value, verbose);
-}
--- a/src/hotspot/share/gc/cms/commandLineFlagConstraintsCMS.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
-#define SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// CMS Flag Constraints
-Flag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose);
-Flag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose);
-Flag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose);
-Flag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose);
-Flag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose);
-
-// CMS Subconstraints
-Flag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose);
-Flag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose);
-
-#endif // SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -68,6 +68,7 @@
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
+#include "gc/shared/cardTableRS.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/jvmFlagConstraintsGC.hpp"
+#include "memory/universe.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+static JVMFlag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
+ // CMSWorkQueueDrainThreshold is verified to be less than max_juint
+ if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
+ CommandLineError::print(verbose,
+ "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
+ UINTX_FORMAT ") is too large\n",
+ threads, threshold);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
+ // To avoid overflow at ParScanClosure::do_oop_work.
+ if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
+ CommandLineError::print(verbose,
+ "ParallelGCThreads (" UINT32_FORMAT ") must be "
+ "less than or equal to " UINT32_FORMAT " for CMS GC\n",
+ value, (max_jint / 10));
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
+}
+JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
+ if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
+ CommandLineError::print(verbose,
+ "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
+ value, ((uintx)max_jint / (uintx)ParallelGCThreads));
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
+ if (UseConcMarkSweepGC) {
+ // ParGCCardsPerStrideChunk should be compared with card table size.
+ size_t heap_size = Universe::heap()->reserved_region().word_size();
+ CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
+ size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
+
+ if ((size_t)value > card_table_size) {
+ CommandLineError::print(verbose,
+ "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
+ "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
+ value, card_table_size);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
+ // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
+ // not to make an overflow with ParallelGCThreads from its constraint function.
+ uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
+ uintx ergo_max = max_uintx / n_strides;
+ if ((uintx)value > ergo_max) {
+ CommandLineError::print(verbose,
+ "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
+ value, ergo_max);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+
+ if (UseConcMarkSweepGC) {
+ if (value > CMSOldPLABMax) {
+ CommandLineError::print(verbose,
+ "CMSOldPLABMin (" SIZE_FORMAT ") must be "
+ "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
+ value, CMSOldPLABMax);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
+ }
+ return status;
+}
+
+JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+
+ if (UseConcMarkSweepGC) {
+ status = MaxPLABSizeBounds("CMSOldPLABMax", value, verbose);
+ }
+ return status;
+}
+
+static JVMFlag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
+ if (UseConcMarkSweepGC) {
+ ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
+ const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
+ if (value > ergo_max) {
+ CommandLineError::print(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
+ "which is based on the maximum size of the old generation of the Java heap\n",
+ name, value, ergo_max);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
+ JVMFlag::Error status = CMSReservedAreaConstraintFunc("CMSRescanMultiple", value, verbose);
+
+ if (status == JVMFlag::SUCCESS && UseConcMarkSweepGC) {
+ // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
+ // to be aligned to CardTable::card_size * BitsPerWord.
+ // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
+ // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
+ if (value % HeapWordSize != 0) {
+ CommandLineError::print(verbose,
+ "CMSRescanMultiple (" SIZE_FORMAT ") must be "
+ "a multiple of " SIZE_FORMAT "\n",
+ value, HeapWordSize);
+ status = JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+
+ return status;
+}
+
+JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
+ return CMSReservedAreaConstraintFunc("CMSConcMarkMultiple", value, verbose);
+}
+
+JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
+ if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
+ CommandLineError::print(verbose,
+ "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
+ "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
+ value, CMSPrecleanNumerator);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
+ if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
+ CommandLineError::print(verbose,
+ "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
+ "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
+ value, CMSPrecleanDenominator);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
+ if (UseConcMarkSweepGC) {
+ size_t max_capacity = GenCollectedHeap::heap()->young_gen()->max_capacity();
+ if (value > max_uintx - max_capacity) {
+ CommandLineError::print(verbose,
+ "CMSSamplingGrain (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
+ value, max_uintx - max_capacity);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) {
+ if (UseConcMarkSweepGC) {
+ return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(ParallelGCThreads, value, verbose);
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
+ // Skip for current default value.
+ if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
+ // CMSBitMapYieldQuantum should be compared with mark bitmap size.
+ ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*)GenCollectedHeap::heap()->old_gen();
+ size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
+
+ if (value > bitmap_size) {
+ CommandLineError::print(verbose,
+ "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
+ "be less than or equal to bitmap size (" SIZE_FORMAT ") "
+ "whose size corresponds to the size of old generation of the Java heap\n",
+ value, bitmap_size);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
+ if (value == 0) {
+ CommandLineError::print(verbose,
+ "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ // For CMS, OldPLABSize is the number of free blocks of a given size that are used when
+ // replenishing the local per-worker free list caches.
+ // For more details, please refer to Arguments::set_cms_and_parnew_gc_flags().
+ return MaxPLABSizeBounds("OldPLABSize", value, verbose);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
+#define SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// CMS Flag Constraints
+JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose);
+JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose);
+
+// CMS Subconstraints
+JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose);
+JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose);
+
+#endif // SHARE_GC_CMS_COMMANDLINEFLAGCONSTRAINTSCMS_HPP
--- a/src/hotspot/share/gc/g1/bufferingOopClosure.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_BUFFERINGOOPCLOSURE_HPP
-#define SHARE_VM_GC_G1_BUFFERINGOOPCLOSURE_HPP
-
-#include "memory/iterator.hpp"
-#include "oops/oopsHierarchy.hpp"
-#include "runtime/os.hpp"
-#include "utilities/debug.hpp"
-
-// A BufferingOops closure tries to separate out the cost of finding roots
-// from the cost of applying closures to them. It maintains an array of
-// ref-containing locations. Until the array is full, applying the closure
-// to an oop* merely records that location in the array. Since this
-// closure app cost is small, an elapsed timer can approximately attribute
-// all of this cost to the cost of finding the roots. When the array fills
-// up, the wrapped closure is applied to all elements, keeping track of
-// this elapsed time of this process, and leaving the array empty.
-// The caller must be sure to call "done" to process any unprocessed
-// buffered entries.
-
-class BufferingOopClosure: public OopClosure {
- friend class BufferingOopClosureTest;
-protected:
- static const size_t BufferLength = 1024;
-
- // We need to know if the buffered addresses contain oops or narrowOops.
- // We can't tag the addresses the way StarTask does, because we need to
- // be able to handle unaligned addresses coming from oops embedded in code.
- //
- // The addresses for the full-sized oops are filled in from the bottom,
- // while the addresses for the narrowOops are filled in from the top.
- OopOrNarrowOopStar _buffer[BufferLength];
- OopOrNarrowOopStar* _oop_top;
- OopOrNarrowOopStar* _narrowOop_bottom;
-
- OopClosure* _oc;
- double _closure_app_seconds;
-
-
- bool is_buffer_empty() {
- return _oop_top == _buffer && _narrowOop_bottom == (_buffer + BufferLength - 1);
- }
-
- bool is_buffer_full() {
- return _narrowOop_bottom < _oop_top;
- }
-
- // Process addresses containing full-sized oops.
- void process_oops() {
- for (OopOrNarrowOopStar* curr = _buffer; curr < _oop_top; ++curr) {
- _oc->do_oop((oop*)(*curr));
- }
- _oop_top = _buffer;
- }
-
- // Process addresses containing narrow oops.
- void process_narrowOops() {
- for (OopOrNarrowOopStar* curr = _buffer + BufferLength - 1; curr > _narrowOop_bottom; --curr) {
- _oc->do_oop((narrowOop*)(*curr));
- }
- _narrowOop_bottom = _buffer + BufferLength - 1;
- }
-
- // Apply the closure to all oops and clear the buffer.
- // Accumulate the time it took.
- void process_buffer() {
- double start = os::elapsedTime();
-
- process_oops();
- process_narrowOops();
-
- _closure_app_seconds += (os::elapsedTime() - start);
- }
-
- void process_buffer_if_full() {
- if (is_buffer_full()) {
- process_buffer();
- }
- }
-
- void add_narrowOop(narrowOop* p) {
- assert(!is_buffer_full(), "Buffer should not be full");
- *_narrowOop_bottom = (OopOrNarrowOopStar)p;
- _narrowOop_bottom--;
- }
-
- void add_oop(oop* p) {
- assert(!is_buffer_full(), "Buffer should not be full");
- *_oop_top = (OopOrNarrowOopStar)p;
- _oop_top++;
- }
-
-public:
- virtual void do_oop(narrowOop* p) {
- process_buffer_if_full();
- add_narrowOop(p);
- }
-
- virtual void do_oop(oop* p) {
- process_buffer_if_full();
- add_oop(p);
- }
-
- void done() {
- if (!is_buffer_empty()) {
- process_buffer();
- }
- }
-
- double closure_app_seconds() {
- return _closure_app_seconds;
- }
-
- BufferingOopClosure(OopClosure *oc) :
- _oc(oc),
- _oop_top(_buffer),
- _narrowOop_bottom(_buffer + BufferLength - 1),
- _closure_app_seconds(0.0) { }
-};
-
-#endif // SHARE_VM_GC_G1_BUFFERINGOOPCLOSURE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,230 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1ThreadLocalData.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "utilities/macros.hpp"
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
+ G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->gen_pre_barrier_stub(ce, this);
+}
+
+void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
+ G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->gen_post_barrier_stub(ce, this);
+}
+
+void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
+ LIR_Opr pre_val, CodeEmitInfo* info) {
+ LIRGenerator* gen = access.gen();
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ bool in_conc_root = (decorators & IN_CONCURRENT_ROOT) != 0;
+ if (!in_heap && !in_conc_root) {
+ return;
+ }
+
+ // First we test whether marking is in progress.
+ BasicType flag_type;
+ bool patch = (decorators & C1_NEEDS_PATCHING) != 0;
+ bool do_load = pre_val == LIR_OprFact::illegalOpr;
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ flag_type = T_INT;
+ } else {
+ guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
+ "Assumption");
+ // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM,
+ // need to use unsigned instructions to use the large offset to load the satb_mark_queue.
+ flag_type = T_BOOLEAN;
+ }
+ LIR_Opr thrd = gen->getThreadPointer();
+ LIR_Address* mark_active_flag_addr =
+ new LIR_Address(thrd,
+ in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()),
+ flag_type);
+ // Read the marking-in-progress flag.
+ LIR_Opr flag_val = gen->new_register(T_INT);
+ __ load(mark_active_flag_addr, flag_val);
+ __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
+
+ LIR_PatchCode pre_val_patch_code = lir_patch_none;
+
+ CodeStub* slow;
+
+ if (do_load) {
+ assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
+ assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
+
+ if (patch)
+ pre_val_patch_code = lir_patch_normal;
+
+ pre_val = gen->new_register(T_OBJECT);
+
+ if (!addr_opr->is_address()) {
+ assert(addr_opr->is_register(), "must be");
+ addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
+ }
+ slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
+ } else {
+ assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
+ assert(pre_val->is_register(), "must be");
+ assert(pre_val->type() == T_OBJECT, "must be an object");
+ assert(info == NULL, "sanity");
+
+ slow = new G1PreBarrierStub(pre_val);
+ }
+
+ __ branch(lir_cond_notEqual, T_INT, slow);
+ __ branch_destination(slow->continuation());
+}
+
+void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprDesc* new_val) {
+ LIRGenerator* gen = access.gen();
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ if (!in_heap) {
+ return;
+ }
+
+ // If the "new_val" is a constant NULL, no barrier is necessary.
+ if (new_val->is_constant() &&
+ new_val->as_constant_ptr()->as_jobject() == NULL) return;
+
+ if (!new_val->is_register()) {
+ LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
+ if (new_val->is_constant()) {
+ __ move(new_val, new_val_reg);
+ } else {
+ __ leal(new_val, new_val_reg);
+ }
+ new_val = new_val_reg;
+ }
+ assert(new_val->is_register(), "must be a register at this point");
+
+ if (addr->is_address()) {
+ LIR_Address* address = addr->as_address_ptr();
+ LIR_Opr ptr = gen->new_pointer_register();
+ if (!address->index()->is_valid() && address->disp() == 0) {
+ __ move(address->base(), ptr);
+ } else {
+ assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
+ __ leal(addr, ptr);
+ }
+ addr = ptr;
+ }
+ assert(addr->is_register(), "must be a register at this point");
+
+ LIR_Opr xor_res = gen->new_pointer_register();
+ LIR_Opr xor_shift_res = gen->new_pointer_register();
+ if (TwoOperandLIRForm) {
+ __ move(addr, xor_res);
+ __ logical_xor(xor_res, new_val, xor_res);
+ __ move(xor_res, xor_shift_res);
+ __ unsigned_shift_right(xor_shift_res,
+ LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
+ xor_shift_res,
+ LIR_OprDesc::illegalOpr());
+ } else {
+ __ logical_xor(addr, new_val, xor_res);
+ __ unsigned_shift_right(xor_res,
+ LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
+ xor_shift_res,
+ LIR_OprDesc::illegalOpr());
+ }
+
+ if (!new_val->is_register()) {
+ LIR_Opr new_val_reg = gen->new_register(T_OBJECT);
+ __ leal(new_val, new_val_reg);
+ new_val = new_val_reg;
+ }
+ assert(new_val->is_register(), "must be a register at this point");
+
+ __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
+
+ CodeStub* slow = new G1PostBarrierStub(addr, new_val);
+ __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
+ __ branch_destination(slow->continuation());
+}
+
+void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+ DecoratorSet decorators = access.decorators();
+ bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0;
+ bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
+ bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+ LIRGenerator *gen = access.gen();
+
+ BarrierSetC1::load_at_resolved(access, result);
+
+ if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) {
+ // Register the value in the referent field with the pre-barrier
+ LabelObj *Lcont_anonymous;
+ if (is_anonymous) {
+ Lcont_anonymous = new LabelObj();
+ generate_referent_check(access, Lcont_anonymous);
+ }
+ pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */,
+ result /* pre_val */, access.patch_emit_info() /* info */);
+ if (is_anonymous) {
+ __ branch_destination(Lcont_anonymous->label());
+ }
+ }
+}
+
+class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
+ virtual OopMapSet* generate_code(StubAssembler* sasm) {
+ G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->generate_c1_pre_barrier_runtime_stub(sasm);
+ return NULL;
+ }
+};
+
+class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
+ virtual OopMapSet* generate_code(StubAssembler* sasm) {
+ G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->generate_c1_post_barrier_runtime_stub(sasm);
+ return NULL;
+ }
+};
+
+void G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
+ C1G1PreBarrierCodeGenClosure pre_code_gen_cl;
+ C1G1PostBarrierCodeGenClosure post_code_gen_cl;
+ _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_pre_barrier_slow",
+ false, &pre_code_gen_cl);
+ _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_post_barrier_slow",
+ false, &post_code_gen_cl);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,138 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_C1_G1BARRIERSETC1_HPP
+#define SHARE_GC_G1_C1_G1BARRIERSETC1_HPP
+
+#include "c1/c1_CodeStubs.hpp"
+#include "gc/shared/c1/modRefBarrierSetC1.hpp"
+
+class G1PreBarrierStub: public CodeStub {
+ friend class G1BarrierSetC1;
+ private:
+ bool _do_load;
+ LIR_Opr _addr;
+ LIR_Opr _pre_val;
+ LIR_PatchCode _patch_code;
+ CodeEmitInfo* _info;
+
+ public:
+ // Version that _does_ generate a load of the previous value from addr.
+ // addr (the address of the field to be read) must be a LIR_Address
+ // pre_val (a temporary register) must be a register;
+ G1PreBarrierStub(LIR_Opr addr, LIR_Opr pre_val, LIR_PatchCode patch_code, CodeEmitInfo* info) :
+ _addr(addr), _pre_val(pre_val), _do_load(true),
+ _patch_code(patch_code), _info(info)
+ {
+ assert(_pre_val->is_register(), "should be temporary register");
+ assert(_addr->is_address(), "should be the address of the field");
+ }
+
+ // Version that _does not_ generate load of the previous value; the
+ // previous value is assumed to have already been loaded into pre_val.
+ G1PreBarrierStub(LIR_Opr pre_val) :
+ _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val), _do_load(false),
+ _patch_code(lir_patch_none), _info(NULL)
+ {
+ assert(_pre_val->is_register(), "should be a register");
+ }
+
+ LIR_Opr addr() const { return _addr; }
+ LIR_Opr pre_val() const { return _pre_val; }
+ LIR_PatchCode patch_code() const { return _patch_code; }
+ CodeEmitInfo* info() const { return _info; }
+ bool do_load() const { return _do_load; }
+
+ virtual void emit_code(LIR_Assembler* e);
+ virtual void visit(LIR_OpVisitState* visitor) {
+ if (_do_load) {
+ // don't pass in the code emit info since it's processed in the fast
+ // path
+ if (_info != NULL)
+ visitor->do_slow_case(_info);
+ else
+ visitor->do_slow_case();
+
+ visitor->do_input(_addr);
+ visitor->do_temp(_pre_val);
+ } else {
+ visitor->do_slow_case();
+ visitor->do_input(_pre_val);
+ }
+ }
+#ifndef PRODUCT
+ virtual void print_name(outputStream* out) const { out->print("G1PreBarrierStub"); }
+#endif // PRODUCT
+};
+
+class G1PostBarrierStub: public CodeStub {
+ friend class G1BarrierSetC1;
+ private:
+ LIR_Opr _addr;
+ LIR_Opr _new_val;
+
+ public:
+ // addr (the address of the object head) and new_val must be registers.
+ G1PostBarrierStub(LIR_Opr addr, LIR_Opr new_val): _addr(addr), _new_val(new_val) { }
+
+ LIR_Opr addr() const { return _addr; }
+ LIR_Opr new_val() const { return _new_val; }
+
+ virtual void emit_code(LIR_Assembler* e);
+ virtual void visit(LIR_OpVisitState* visitor) {
+ // don't pass in the code emit info since it's processed in the fast path
+ visitor->do_slow_case();
+ visitor->do_input(_addr);
+ visitor->do_input(_new_val);
+ }
+#ifndef PRODUCT
+ virtual void print_name(outputStream* out) const { out->print("G1PostBarrierStub"); }
+#endif // PRODUCT
+};
+
+class CodeBlob;
+
+class G1BarrierSetC1 : public ModRefBarrierSetC1 {
+ protected:
+ CodeBlob* _pre_barrier_c1_runtime_code_blob;
+ CodeBlob* _post_barrier_c1_runtime_code_blob;
+
+ virtual void pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
+ LIR_Opr pre_val, CodeEmitInfo* info);
+ virtual void post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprDesc* new_val);
+
+ virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+
+ public:
+ G1BarrierSetC1()
+ : _pre_barrier_c1_runtime_code_blob(NULL),
+ _post_barrier_c1_runtime_code_blob(NULL) {}
+
+ CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; }
+ CodeBlob* post_barrier_c1_runtime_code_blob() { return _post_barrier_c1_runtime_code_blob; }
+
+ virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob);
+};
+
+#endif // SHARE_GC_G1_C1_G1BARRIERSETC1_HPP
--- a/src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/heapRegionBounds.inline.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-Flag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose) {
- if (!UseG1GC) return Flag::SUCCESS;
-
- // Default value of G1RSetRegionEntries=0 means will be set ergonomically.
- // Minimum value is 1.
- if (FLAG_IS_CMDLINE(G1RSetRegionEntries) && (value < 1)) {
- CommandLineError::print(verbose,
- "G1RSetRegionEntries (" INTX_FORMAT ") must be "
- "greater than or equal to 1\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose) {
- if (!UseG1GC) return Flag::SUCCESS;
-
- // Default value of G1RSetSparseRegionEntries=0 means will be set ergonomically.
- // Minimum value is 1.
- if (FLAG_IS_CMDLINE(G1RSetSparseRegionEntries) && (value < 1)) {
- CommandLineError::print(verbose,
- "G1RSetSparseRegionEntries (" INTX_FORMAT ") must be "
- "greater than or equal to 1\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose) {
- if (!UseG1GC) return Flag::SUCCESS;
-
- // Default value of G1HeapRegionSize=0 means will be set ergonomically.
- if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < HeapRegionBounds::min_size())) {
- CommandLineError::print(verbose,
- "G1HeapRegionSize (" SIZE_FORMAT ") must be "
- "greater than or equal to ergonomic heap region minimum size\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose) {
- if (!UseG1GC) return Flag::SUCCESS;
-
- if (value > G1MaxNewSizePercent) {
- CommandLineError::print(verbose,
- "G1NewSizePercent (" UINTX_FORMAT ") must be "
- "less than or equal to G1MaxNewSizePercent (" UINTX_FORMAT ")\n",
- value, G1MaxNewSizePercent);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose) {
- if (!UseG1GC) return Flag::SUCCESS;
-
- if (value < G1NewSizePercent) {
- CommandLineError::print(verbose,
- "G1MaxNewSizePercent (" UINTX_FORMAT ") must be "
- "greater than or equal to G1NewSizePercent (" UINTX_FORMAT ")\n",
- value, G1NewSizePercent);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose) {
- if (UseG1GC && FLAG_IS_CMDLINE(MaxGCPauseMillis) && (value >= GCPauseIntervalMillis)) {
- CommandLineError::print(verbose,
- "MaxGCPauseMillis (" UINTX_FORMAT ") must be "
- "less than GCPauseIntervalMillis (" UINTX_FORMAT ")\n",
- value, GCPauseIntervalMillis);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose) {
- if (UseG1GC) {
- if (FLAG_IS_CMDLINE(GCPauseIntervalMillis)) {
- if (value < 1) {
- CommandLineError::print(verbose,
- "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
- "greater than or equal to 1\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
- CommandLineError::print(verbose,
- "GCPauseIntervalMillis cannot be set "
- "without setting MaxGCPauseMillis\n");
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- if (value <= MaxGCPauseMillis) {
- CommandLineError::print(verbose,
- "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
- "greater than MaxGCPauseMillis (" UINTX_FORMAT ")\n",
- value, MaxGCPauseMillis);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
-#ifdef _LP64
- // Overflow would happen for uint type variable of YoungGenSizer::_min_desired_young_length
- // when the value to be assigned exceeds uint range.
- // i.e. result of '(uint)(NewSize / region size(1~32MB))'
- // So maximum of NewSize should be 'max_juint * 1M'
- if (UseG1GC && (value > (max_juint * 1 * M))) {
- CommandLineError::print(verbose,
- "NewSize (" SIZE_FORMAT ") must be less than ergonomic maximum value\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif // _LP64
- return Flag::SUCCESS;
-}
-
-size_t MaxSizeForHeapAlignmentG1() {
- return HeapRegionBounds::max_size();
-}
--- a/src/hotspot/share/gc/g1/commandLineFlagConstraintsG1.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
-#define SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// G1 Flag Constraints
-Flag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose);
-Flag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose);
-Flag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose);
-Flag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose);
-
-// G1 Subconstraints
-Flag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose);
-Flag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose);
-Flag::Error MaxSizeForHeapAlignmentG1(const char* name, size_t value, bool verbose);
-Flag::Error NewSizeConstraintFuncG1(size_t value, bool verbose);
-
-size_t MaxSizeForHeapAlignmentG1();
-
-#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSG1_HPP
--- a/src/hotspot/share/gc/g1/g1Arguments.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1Arguments.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -126,6 +126,11 @@
log_trace(gc)("MarkStackSize: %uk MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
+ // By default do not let the target stack size to be more than 1/4 of the entries
+ if (FLAG_IS_DEFAULT(GCDrainStackTargetSize)) {
+ FLAG_SET_ERGO(uintx, GCDrainStackTargetSize, MIN2(GCDrainStackTargetSize, (uintx)TASKQUEUE_SIZE / 4));
+ }
+
#ifdef COMPILER2
// Enable loop strip mining to offer better pause time guarantees
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -37,12 +37,18 @@
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "gc/g1/c1/g1BarrierSetC1.hpp"
+#endif
+
+class G1BarrierSetC1;
SATBMarkQueueSet G1BarrierSet::_satb_mark_queue_set;
DirtyCardQueueSet G1BarrierSet::_dirty_card_queue_set;
G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
CardTableBarrierSet(make_barrier_set_assembler<G1BarrierSetAssembler>(),
+ make_barrier_set_c1<G1BarrierSetC1>(),
card_table,
BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)) {}
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -28,7 +28,6 @@
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
-#include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1Allocator.inline.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
@@ -84,6 +83,7 @@
#include "oops/oop.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/orderAccess.inline.hpp"
@@ -1840,7 +1840,7 @@
while (dcqs.apply_closure_during_gc(cl, worker_i)) {
n_completed_buffers++;
}
- g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
+ g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
dcqs.clear_n_completed_buffers();
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
}
@@ -3129,15 +3129,13 @@
double start_strong_roots_sec = os::elapsedTime();
- _root_processor->evacuate_roots(pss->closures(), worker_id);
+ _root_processor->evacuate_roots(pss, worker_id);
// We pass a weak code blobs closure to the remembered set scanning because we want to avoid
// treating the nmethods visited to act as roots for concurrent marking.
// We only want to make sure that the oops in the nmethods are adjusted with regard to the
// objects copied by the current evacuation.
- _g1h->g1_rem_set()->oops_into_collection_set_do(pss,
- pss->closures()->weak_codeblobs(),
- worker_id);
+ _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
@@ -3151,9 +3149,11 @@
evac_term_attempts = evac.term_attempts();
term_sec = evac.term_time();
double elapsed_sec = os::elapsedTime() - start;
- _g1h->g1_policy()->phase_times()->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
- _g1h->g1_policy()->phase_times()->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
- _g1h->g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
+
+ G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
+ p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
+ p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
+ p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
}
assert(pss->queue_is_empty(), "should be empty");
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HotCardCache.hpp"
+#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1StringDedup.hpp"
#include "gc/shared/workerDataArray.inline.hpp"
#include "memory/resourceArea.hpp"
@@ -167,9 +168,12 @@
}
#define ASSERT_PHASE_UNINITIALIZED(phase) \
- assert(_gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
+ assert(_gc_par_phases[phase] == NULL || _gc_par_phases[phase]->get(i) == uninitialized, "Phase " #phase " reported for thread that was not started");
double G1GCPhaseTimes::worker_time(GCParPhases phase, uint worker) {
+ if (_gc_par_phases[phase] == NULL) {
+ return 0.0;
+ }
double value = _gc_par_phases[phase]->get(worker);
if (value != WorkerDataArray<double>::uninitialized()) {
return value;
@@ -189,21 +193,20 @@
double total_worker_time = _gc_par_phases[GCWorkerEnd]->get(i) - _gc_par_phases[GCWorkerStart]->get(i);
record_time_secs(GCWorkerTotal, i , total_worker_time);
- double worker_known_time =
- worker_time(ExtRootScan, i)
- + worker_time(SATBFiltering, i)
- + worker_time(UpdateRS, i)
- + worker_time(ScanRS, i)
- + worker_time(CodeRoots, i)
- + worker_time(ObjCopy, i)
- + worker_time(Termination, i);
+ double worker_known_time = worker_time(ExtRootScan, i) +
+ worker_time(ScanHCC, i) +
+ worker_time(UpdateRS, i) +
+ worker_time(ScanRS, i) +
+ worker_time(CodeRoots, i) +
+ worker_time(ObjCopy, i) +
+ worker_time(Termination, i);
record_time_secs(Other, i, total_worker_time - worker_known_time);
} else {
// Make sure all slots are uninitialized since this thread did not seem to have been started
ASSERT_PHASE_UNINITIALIZED(GCWorkerEnd);
ASSERT_PHASE_UNINITIALIZED(ExtRootScan);
- ASSERT_PHASE_UNINITIALIZED(SATBFiltering);
+ ASSERT_PHASE_UNINITIALIZED(ScanHCC);
ASSERT_PHASE_UNINITIALIZED(UpdateRS);
ASSERT_PHASE_UNINITIALIZED(ScanRS);
ASSERT_PHASE_UNINITIALIZED(CodeRoots);
@@ -225,6 +228,14 @@
_gc_par_phases[phase]->add(worker_i, secs);
}
+void G1GCPhaseTimes::record_or_add_objcopy_time_secs(uint worker_i, double secs) {
+ if (_gc_par_phases[ObjCopy]->get(worker_i) == _gc_par_phases[ObjCopy]->uninitialized()) {
+ record_time_secs(ObjCopy, worker_i, secs);
+ } else {
+ add_time_secs(ObjCopy, worker_i, secs);
+ }
+}
+
void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
_gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
}
@@ -463,16 +474,49 @@
}
}
+G1EvacPhaseWithTrimTimeTracker::G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time) :
+ _pss(pss),
+ _start(Ticks::now()),
+ _total_time(total_time),
+ _trim_time(trim_time) {
+
+ assert(_pss->trim_ticks().value() == 0, "Possibly remaining trim ticks left over from previous use");
+}
+
+G1EvacPhaseWithTrimTimeTracker::~G1EvacPhaseWithTrimTimeTracker() {
+ _total_time += (Ticks::now() - _start) - _pss->trim_ticks();
+ _trim_time += _pss->trim_ticks();
+ _pss->reset_trim_ticks();
+}
+
G1GCParPhaseTimesTracker::G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id) :
_phase_times(phase_times), _phase(phase), _worker_id(worker_id) {
if (_phase_times != NULL) {
- _start_time = os::elapsedTime();
+ _start_time = Ticks::now();
}
}
G1GCParPhaseTimesTracker::~G1GCParPhaseTimesTracker() {
if (_phase_times != NULL) {
- _phase_times->record_time_secs(_phase, _worker_id, os::elapsedTime() - _start_time);
+ _phase_times->record_time_secs(_phase, _worker_id, TicksToTimeHelper::seconds(Ticks::now() - _start_time));
}
}
+G1EvacPhaseTimesTracker::G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times,
+ G1ParScanThreadState* pss,
+ G1GCPhaseTimes::GCParPhases phase,
+ uint worker_id) :
+ G1GCParPhaseTimesTracker(phase_times, phase, worker_id),
+ _total_time(),
+ _trim_time(),
+ _trim_tracker(pss, _total_time, _trim_time) {
+}
+
+G1EvacPhaseTimesTracker::~G1EvacPhaseTimesTracker() {
+ if (_phase_times != NULL) {
+ // Exclude trim time by increasing the start time.
+ _start_time += _trim_time;
+ _phase_times->record_or_add_objcopy_time_secs(_worker_id, TicksToTimeHelper::seconds(_trim_time));
+ }
+}
+
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,7 @@
#include "utilities/macros.hpp"
class LineBuffer;
+class G1ParScanThreadState;
class STWGCTimer;
template <class T> class WorkerDataArray;
@@ -198,6 +199,8 @@
// add a number of seconds to a phase
void add_time_secs(GCParPhases phase, uint worker_i, double secs);
+ void record_or_add_objcopy_time_secs(uint worker_i, double secs);
+
void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
// return the average time for a phase in milliseconds
@@ -369,14 +372,36 @@
ReferenceProcessorPhaseTimes* ref_phase_times() { return &_ref_phase_times; }
};
-class G1GCParPhaseTimesTracker : public StackObj {
- double _start_time;
+class G1EvacPhaseWithTrimTimeTracker : public StackObj {
+ G1ParScanThreadState* _pss;
+ Ticks _start;
+
+ Tickspan& _total_time;
+ Tickspan& _trim_time;
+public:
+ G1EvacPhaseWithTrimTimeTracker(G1ParScanThreadState* pss, Tickspan& total_time, Tickspan& trim_time);
+ ~G1EvacPhaseWithTrimTimeTracker();
+};
+
+class G1GCParPhaseTimesTracker : public CHeapObj<mtGC> {
+protected:
+ Ticks _start_time;
G1GCPhaseTimes::GCParPhases _phase;
G1GCPhaseTimes* _phase_times;
uint _worker_id;
public:
G1GCParPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1GCPhaseTimes::GCParPhases phase, uint worker_id);
- ~G1GCParPhaseTimesTracker();
+ virtual ~G1GCParPhaseTimesTracker();
+};
+
+class G1EvacPhaseTimesTracker : public G1GCParPhaseTimesTracker {
+ Tickspan _total_time;
+ Tickspan _trim_time;
+
+ G1EvacPhaseWithTrimTimeTracker _trim_tracker;
+public:
+ G1EvacPhaseTimesTracker(G1GCPhaseTimes* phase_times, G1ParScanThreadState* pss, G1GCPhaseTimes::GCParPhases phase, uint worker_id);
+ virtual ~G1EvacPhaseTimesTracker();
};
#endif // SHARE_VM_GC_G1_G1GCPHASETIMES_HPP
--- a/src/hotspot/share/gc/g1/g1OopClosures.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1OopClosures.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -56,6 +56,8 @@
cld->oops_do(_closure, _must_claim, /*clear_modified_oops*/true);
_closure->set_scanned_cld(NULL);
+
+ _closure->trim_queue_partially();
}
_count++;
}
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -57,6 +57,8 @@
// This closure needs special handling for InstanceRefKlass.
virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERED_AND_DISCOVERY; }
void set_region(HeapRegion* from) { _from = from; }
+
+ inline void trim_queue_partially();
};
// Used during the Update RS phase to refine remaining cards in the DCQ during garbage collection.
@@ -126,6 +128,8 @@
public:
void set_scanned_cld(ClassLoaderData* cld) { _scanned_cld = cld; }
inline void do_cld_barrier(oop new_obj);
+
+ inline void trim_queue_partially();
};
enum G1Barrier {
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -67,6 +67,10 @@
}
}
+inline void G1ScanClosureBase::trim_queue_partially() {
+ _par_scan_state->trim_queue_partially();
+}
+
template <class T>
inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
T heap_oop = RawAccess<>::oop_load(p);
@@ -225,6 +229,10 @@
_cm->mark_in_next_bitmap(_worker_id, to_obj, from_obj->size());
}
+void G1ParCopyHelper::trim_queue_partially() {
+ _par_scan_state->trim_queue_partially();
+}
+
template <G1Barrier barrier, G1Mark do_mark_object>
template <class T>
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
@@ -269,6 +277,7 @@
mark_object(obj);
}
}
+ trim_queue_partially();
}
template <class T> void G1RebuildRemSetClosure::do_oop_nv(T* p) {
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -43,11 +43,15 @@
_dcq(&g1h->dirty_card_queue_set()),
_ct(g1h->card_table()),
_closures(NULL),
+ _plab_allocator(NULL),
+ _age_table(false),
+ _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
+ _scanner(g1h, this),
_hash_seed(17),
_worker_id(worker_id),
- _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
- _age_table(false),
- _scanner(g1h, this),
+ _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
+ _stack_trim_lower_threshold(GCDrainStackTargetSize),
+ _trim_ticks(),
_old_gen_is_full(false)
{
// we allocate G1YoungSurvRateNumRegions plus one entries, since
@@ -138,16 +142,8 @@
void G1ParScanThreadState::trim_queue() {
StarTask ref;
do {
- // Drain the overflow stack first, so other threads can steal.
- while (_refs->pop_overflow(ref)) {
- if (!_refs->try_push_to_taskqueue(ref)) {
- dispatch_reference(ref);
- }
- }
-
- while (_refs->pop_local(ref)) {
- dispatch_reference(ref);
- }
+ // Fully drain the queue.
+ trim_queue_to_threshold(0);
} while (!_refs->is_empty());
}
@@ -314,7 +310,7 @@
// length field of the from-space object.
arrayOop(obj)->set_length(0);
oop* old_p = set_partial_array_mask(old);
- push_on_queue(old_p);
+ do_oop_partial_array(old_p);
} else {
HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
_scanner.set_region(to_region);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -35,6 +35,7 @@
#include "gc/shared/ageTable.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.hpp"
+#include "utilities/ticks.hpp"
class G1PLABAllocator;
class G1EvacuationRootClosures;
@@ -42,7 +43,6 @@
class outputStream;
class G1ParScanThreadState : public CHeapObj<mtGC> {
- private:
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
@@ -60,6 +60,11 @@
int _hash_seed;
uint _worker_id;
+ // Upper and lower threshold to start and end work queue draining.
+ uint const _stack_trim_upper_threshold;
+ uint const _stack_trim_lower_threshold;
+
+ Tickspan _trim_ticks;
// Map from young-age-index (0 == not young, 1 is youngest) to
// surviving words. base is what we get back from the malloc call
size_t* _surviving_young_words_base;
@@ -83,7 +88,7 @@
return _dest[original.value()];
}
- public:
+public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
virtual ~G1ParScanThreadState();
@@ -129,7 +134,7 @@
void flush(size_t* surviving_young_words);
- private:
+private:
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
@@ -185,11 +190,19 @@
void report_promotion_event(InCSetState const dest_state,
oop const old, size_t word_sz, uint age,
HeapWord * const obj_ptr) const;
- public:
+ inline bool needs_partial_trimming() const;
+ inline bool is_partially_trimmed() const;
+
+ inline void trim_queue_to_threshold(uint threshold);
+public:
oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
void trim_queue();
+ void trim_queue_partially();
+
+ Tickspan trim_ticks() const;
+ void reset_trim_ticks();
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -29,6 +29,7 @@
#include "gc/g1/g1RemSet.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "utilities/ticks.inline.hpp"
template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
// Reference should not be NULL here as such are never pushed to the task queue.
@@ -151,4 +152,46 @@
}
}
+inline bool G1ParScanThreadState::needs_partial_trimming() const {
+ return !_refs->overflow_empty() || _refs->size() > _stack_trim_upper_threshold;
+}
+
+inline bool G1ParScanThreadState::is_partially_trimmed() const {
+ return _refs->overflow_empty() && _refs->size() <= _stack_trim_lower_threshold;
+}
+
+inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
+ StarTask ref;
+ // Drain the overflow stack first, so other threads can potentially steal.
+ while (_refs->pop_overflow(ref)) {
+ if (!_refs->try_push_to_taskqueue(ref)) {
+ dispatch_reference(ref);
+ }
+ }
+
+ while (_refs->pop_local(ref, threshold)) {
+ dispatch_reference(ref);
+ }
+}
+
+inline void G1ParScanThreadState::trim_queue_partially() {
+ if (!needs_partial_trimming()) {
+ return;
+ }
+
+ const Ticks start = Ticks::now();
+ do {
+ trim_queue_to_threshold(_stack_trim_lower_threshold);
+ } while (!is_partially_trimmed());
+ _trim_ticks += Ticks::now() - start;
+}
+
+inline Tickspan G1ParScanThreadState::trim_ticks() const {
+ return _trim_ticks;
+}
+
+inline void G1ParScanThreadState::reset_trim_ticks() {
+ _trim_ticks = Tickspan();
+}
+
#endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1Policy.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -628,7 +628,7 @@
if (update_stats) {
double cost_per_card_ms = 0.0;
if (_pending_cards > 0) {
- cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms) / (double) _pending_cards;
+ cost_per_card_ms = (average_time_ms(G1GCPhaseTimes::UpdateRS)) / (double) _pending_cards;
_analytics->report_cost_per_card_ms(cost_per_card_ms);
}
_analytics->report_cost_scan_hcc(scan_hcc_time_ms);
@@ -730,9 +730,9 @@
} else {
update_rs_time_goal_ms -= scan_hcc_time_ms;
}
- _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
- phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
- update_rs_time_goal_ms);
+ _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS),
+ phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
+ update_rs_time_goal_ms);
cset_chooser()->verify();
}
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,6 +33,7 @@
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HotCardCache.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
@@ -306,32 +307,21 @@
G1ScanRSForRegionClosure::G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
- CodeBlobClosure* code_root_cl,
+ G1ParScanThreadState* pss,
uint worker_i) :
+ _g1h(G1CollectedHeap::heap()),
+ _ct(_g1h->card_table()),
+ _pss(pss),
+ _scan_objs_on_card_cl(scan_obj_on_card),
_scan_state(scan_state),
- _scan_objs_on_card_cl(scan_obj_on_card),
- _code_root_cl(code_root_cl),
- _strong_code_root_scan_time_sec(0.0),
+ _worker_i(worker_i),
_cards_claimed(0),
_cards_scanned(0),
_cards_skipped(0),
- _worker_i(worker_i) {
- _g1h = G1CollectedHeap::heap();
- _bot = _g1h->bot();
- _ct = _g1h->card_table();
-}
-
-void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
- HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
- _scan_objs_on_card_cl->set_region(card_region);
- card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
- _cards_scanned++;
-}
-
-void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
- double scan_start = os::elapsedTime();
- r->strong_code_roots_do(_code_root_cl);
- _strong_code_root_scan_time_sec += (os::elapsedTime() - scan_start);
+ _rem_set_root_scan_time(),
+ _rem_set_trim_partially_time(),
+ _strong_code_root_scan_time(),
+ _strong_code_trim_partially_time() {
}
void G1ScanRSForRegionClosure::claim_card(size_t card_index, const uint region_idx_for_card){
@@ -339,13 +329,17 @@
_scan_state->add_dirty_region(region_idx_for_card);
}
-bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
- assert(r->in_collection_set(), "should only be called on elements of CS.");
- uint region_idx = r->hrm_index();
+void G1ScanRSForRegionClosure::scan_card(MemRegion mr, uint region_idx_for_card) {
+ HeapRegion* const card_region = _g1h->region_at(region_idx_for_card);
+ _scan_objs_on_card_cl->set_region(card_region);
+ card_region->oops_on_card_seq_iterate_careful<true>(mr, _scan_objs_on_card_cl);
+ _scan_objs_on_card_cl->trim_queue_partially();
+ _cards_scanned++;
+}
- if (_scan_state->iter_is_complete(region_idx)) {
- return false;
- }
+void G1ScanRSForRegionClosure::scan_rem_set_roots(HeapRegion* r) {
+ uint const region_idx = r->hrm_index();
+
if (_scan_state->claim_iter(region_idx)) {
// If we ever free the collection set concurrently, we should also
// clear the card table concurrently therefore we won't need to
@@ -397,33 +391,52 @@
scan_card(mr, region_idx_for_card);
}
+}
+
+void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
+ r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
+}
+
+bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) {
+ assert(r->in_collection_set(),
+ "Should only be called on elements of the collection set but region %u is not.",
+ r->hrm_index());
+ uint const region_idx = r->hrm_index();
+
+ // Do an early out if we know we are complete.
+ if (_scan_state->iter_is_complete(region_idx)) {
+ return false;
+ }
+
+ {
+ G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_root_scan_time, _rem_set_trim_partially_time);
+ scan_rem_set_roots(r);
+ }
+
if (_scan_state->set_iter_complete(region_idx)) {
+ G1EvacPhaseWithTrimTimeTracker timer(_pss, _strong_code_root_scan_time, _strong_code_trim_partially_time);
// Scan the strong code root list attached to the current region
scan_strong_code_roots(r);
}
return false;
}
-void G1RemSet::scan_rem_set(G1ParScanThreadState* pss,
- CodeBlobClosure* heap_region_codeblobs,
- uint worker_i) {
- double rs_time_start = os::elapsedTime();
-
+void G1RemSet::scan_rem_set(G1ParScanThreadState* pss, uint worker_i) {
G1ScanObjsDuringScanRSClosure scan_cl(_g1h, pss);
- G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, heap_region_codeblobs, worker_i);
+ G1ScanRSForRegionClosure cl(_scan_state, &scan_cl, pss, worker_i);
_g1h->collection_set_iterate_from(&cl, worker_i);
- double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
- cl.strong_code_root_scan_time_sec();
-
G1GCPhaseTimes* p = _g1p->phase_times();
- p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, scan_rs_time_sec);
+ p->record_time_secs(G1GCPhaseTimes::ScanRS, worker_i, TicksToTimeHelper::seconds(cl.rem_set_root_scan_time()));
+ p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, TicksToTimeHelper::seconds(cl.rem_set_trim_partially_time()));
+
p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_scanned(), G1GCPhaseTimes::ScanRSScannedCards);
p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_claimed(), G1GCPhaseTimes::ScanRSClaimedCards);
p->record_thread_work_item(G1GCPhaseTimes::ScanRS, worker_i, cl.cards_skipped(), G1GCPhaseTimes::ScanRSSkippedCards);
- p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, cl.strong_code_root_scan_time_sec());
+ p->record_time_secs(G1GCPhaseTimes::CodeRoots, worker_i, TicksToTimeHelper::seconds(cl.strong_code_root_scan_time()));
+ p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, TicksToTimeHelper::seconds(cl.strong_code_root_trim_partially_time()));
}
// Closure used for updating rem sets. Only called during an evacuation pause.
@@ -448,6 +461,7 @@
bool card_scanned = _g1rs->refine_card_during_gc(card_ptr, _update_rs_cl);
if (card_scanned) {
+ _update_rs_cl->trim_queue_partially();
_cards_scanned++;
} else {
_cards_skipped++;
@@ -460,32 +474,37 @@
};
void G1RemSet::update_rem_set(G1ParScanThreadState* pss, uint worker_i) {
- G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss, worker_i);
- G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
+ G1GCPhaseTimes* p = _g1p->phase_times();
- G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
+ // Apply closure to log entries in the HCC.
if (G1HotCardCache::default_use_cache()) {
- // Apply the closure to the entries of the hot card cache.
- G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
+ G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::ScanHCC, worker_i);
+
+ G1ScanObjsDuringUpdateRSClosure scan_hcc_cl(_g1h, pss, worker_i);
+ G1RefineCardClosure refine_card_cl(_g1h, &scan_hcc_cl);
_g1h->iterate_hcc_closure(&refine_card_cl, worker_i);
}
- // Apply the closure to all remaining log entries.
- _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
+
+ // Now apply the closure to all remaining log entries.
+ {
+ G1EvacPhaseTimesTracker x(p, pss, G1GCPhaseTimes::UpdateRS, worker_i);
- G1GCPhaseTimes* p = _g1p->phase_times();
- p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
- p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
+ G1ScanObjsDuringUpdateRSClosure update_rs_cl(_g1h, pss, worker_i);
+ G1RefineCardClosure refine_card_cl(_g1h, &update_rs_cl);
+ _g1h->iterate_dirty_card_closure(&refine_card_cl, worker_i);
+
+ p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_scanned(), G1GCPhaseTimes::UpdateRSScannedCards);
+ p->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, refine_card_cl.cards_skipped(), G1GCPhaseTimes::UpdateRSSkippedCards);
+ }
}
void G1RemSet::cleanupHRRS() {
HeapRegionRemSet::cleanup();
}
-void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss,
- CodeBlobClosure* heap_region_codeblobs,
- uint worker_i) {
+void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i) {
update_rem_set(pss, worker_i);
- scan_rem_set(pss, heap_region_codeblobs, worker_i);;
+ scan_rem_set(pss, worker_i);;
}
void G1RemSet::prepare_for_oops_into_collection_set_do() {
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -32,6 +32,7 @@
#include "gc/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
+#include "utilities/ticks.hpp"
// A G1RemSet provides ways of iterating over pointers into a selected
// collection set.
@@ -61,9 +62,7 @@
// Scan all remembered sets of the collection set for references into the collection
// set.
- void scan_rem_set(G1ParScanThreadState* pss,
- CodeBlobClosure* heap_region_codeblobs,
- uint worker_i);
+ void scan_rem_set(G1ParScanThreadState* pss, uint worker_i);
// Flush remaining refinement buffers for cross-region references to either evacuate references
// into the collection set or update the remembered set.
@@ -102,9 +101,7 @@
//
// Further applies heap_region_codeblobs on the oops of the unmarked nmethods on the strong code
// roots list for each region in the collection set.
- void oops_into_collection_set_do(G1ParScanThreadState* pss,
- CodeBlobClosure* heap_region_codeblobs,
- uint worker_i);
+ void oops_into_collection_set_do(G1ParScanThreadState* pss, uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
// call. Must call each of these once before and after (in sequential
@@ -138,37 +135,44 @@
};
class G1ScanRSForRegionClosure : public HeapRegionClosure {
+ G1CollectedHeap* _g1h;
+ G1CardTable *_ct;
+
+ G1ParScanThreadState* _pss;
+ G1ScanObjsDuringScanRSClosure* _scan_objs_on_card_cl;
+
G1RemSetScanState* _scan_state;
+ uint _worker_i;
+
size_t _cards_scanned;
size_t _cards_claimed;
size_t _cards_skipped;
- G1CollectedHeap* _g1h;
-
- G1ScanObjsDuringScanRSClosure* _scan_objs_on_card_cl;
- CodeBlobClosure* _code_root_cl;
+ Tickspan _rem_set_root_scan_time;
+ Tickspan _rem_set_trim_partially_time;
- G1BlockOffsetTable* _bot;
- G1CardTable *_ct;
-
- double _strong_code_root_scan_time_sec;
- uint _worker_i;
+ Tickspan _strong_code_root_scan_time;
+ Tickspan _strong_code_trim_partially_time;
void claim_card(size_t card_index, const uint region_idx_for_card);
void scan_card(MemRegion mr, uint region_idx_for_card);
+
+ void scan_rem_set_roots(HeapRegion* r);
void scan_strong_code_roots(HeapRegion* r);
public:
G1ScanRSForRegionClosure(G1RemSetScanState* scan_state,
G1ScanObjsDuringScanRSClosure* scan_obj_on_card,
- CodeBlobClosure* code_root_cl,
+ G1ParScanThreadState* pss,
uint worker_i);
bool do_heap_region(HeapRegion* r);
- double strong_code_root_scan_time_sec() {
- return _strong_code_root_scan_time_sec;
- }
+ Tickspan rem_set_root_scan_time() const { return _rem_set_root_scan_time; }
+ Tickspan rem_set_trim_partially_time() const { return _rem_set_trim_partially_time; }
+
+ Tickspan strong_code_root_scan_time() const { return _strong_code_root_scan_time; }
+ Tickspan strong_code_root_trim_partially_time() const { return _strong_code_trim_partially_time; }
size_t cards_scanned() const { return _cards_scanned; }
size_t cards_claimed() const { return _cards_claimed; }
--- a/src/hotspot/share/gc/g1/g1RootClosures.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RootClosures.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -37,8 +37,8 @@
bool in_young_gc) :
_closures(g1h, pss, in_young_gc, /* must_claim_cld */ false) {}
- OopClosure* weak_oops() { return &_closures._buffered_oops; }
- OopClosure* strong_oops() { return &_closures._buffered_oops; }
+ OopClosure* weak_oops() { return &_closures._oops; }
+ OopClosure* strong_oops() { return &_closures._oops; }
CLDClosure* weak_clds() { return &_closures._clds; }
CLDClosure* strong_clds() { return &_closures._clds; }
@@ -47,9 +47,6 @@
CodeBlobClosure* strong_codeblobs() { return &_closures._codeblobs; }
CodeBlobClosure* weak_codeblobs() { return &_closures._codeblobs; }
- void flush() { _closures._buffered_oops.done(); }
- double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
-
OopClosure* raw_strong_oops() { return &_closures._oops; }
bool trace_metadata() { return false; }
@@ -79,8 +76,8 @@
_strong(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
_weak(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true) {}
- OopClosure* weak_oops() { return &_weak._buffered_oops; }
- OopClosure* strong_oops() { return &_strong._buffered_oops; }
+ OopClosure* weak_oops() { return &_weak._oops; }
+ OopClosure* strong_oops() { return &_strong._oops; }
// If MarkWeak is G1MarkPromotedFromRoot then the weak CLDs must be processed in a second pass.
CLDClosure* weak_clds() { return null_if<G1MarkPromotedFromRoot>(&_weak._clds); }
@@ -93,16 +90,6 @@
CodeBlobClosure* strong_codeblobs() { return &_strong._codeblobs; }
CodeBlobClosure* weak_codeblobs() { return &_weak._codeblobs; }
- void flush() {
- _strong._buffered_oops.done();
- _weak._buffered_oops.done();
- }
-
- double closure_app_seconds() {
- return _strong._buffered_oops.closure_app_seconds() +
- _weak._buffered_oops.closure_app_seconds();
- }
-
OopClosure* raw_strong_oops() { return &_strong._oops; }
// If we are not marking all weak roots then we are tracing
--- a/src/hotspot/share/gc/g1/g1RootClosures.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RootClosures.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,10 +49,6 @@
class G1EvacuationRootClosures : public G1RootClosures {
public:
- // Flush any buffered state and deferred processing
- virtual void flush() = 0;
- virtual double closure_app_seconds() = 0;
-
// Applied to the weakly reachable CLDs when all strongly reachable
// CLDs are guaranteed to have been processed.
virtual CLDClosure* second_pass_weak_clds() = 0;
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -28,12 +28,12 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
-#include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
+#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RootProcessor.hpp"
@@ -73,10 +73,12 @@
_lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
_n_workers_discovered_strong_classes(0) {}
-void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
- double ext_roots_start = os::elapsedTime();
+void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
+ G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_i);
+
+ G1EvacuationRootClosures* closures = pss->closures();
process_java_roots(closures, phase_times, worker_i);
// This is the point where this worker thread will not find more strong CLDs/nmethods.
@@ -118,17 +120,6 @@
assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
}
- // Finish up any enqueued closure apps (attributed as object copy time).
- closures->flush();
-
- double obj_copy_time_sec = closures->closure_app_seconds();
-
- phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
-
- double ext_root_time_sec = os::elapsedTime() - ext_roots_start - obj_copy_time_sec;
-
- phase_times->record_time_secs(G1GCPhaseTimes::ExtRootScan, worker_i, ext_root_time_sec);
-
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
// as implicitly live).
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
class G1CollectedHeap;
class G1EvacuationRootClosures;
class G1GCPhaseTimes;
+class G1ParScanThreadState;
class G1RootClosures;
class Monitor;
class OopClosure;
@@ -97,10 +98,10 @@
public:
G1RootProcessor(G1CollectedHeap* g1h, uint n_workers);
- // Apply closures to the strongly and weakly reachable roots in the system
+ // Apply correct closures from pss to the strongly and weakly reachable roots in the system
// in a single pass.
// Record and report timing measurements for sub phases using the worker_i
- void evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i);
+ void evacuate_roots(G1ParScanThreadState* pss, uint worker_id);
// Apply oops, clds and blobs to all strongly reachable roots in the system
void process_strong_roots(OopClosure* oops,
--- a/src/hotspot/share/gc/g1/g1SharedClosures.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1SharedClosures.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -22,7 +22,6 @@
*
*/
-#include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/g1CodeBlobClosure.hpp"
#include "gc/g1/g1OopClosures.hpp"
#include "memory/iterator.hpp"
@@ -39,12 +38,10 @@
G1CLDScanClosure _clds;
G1CodeBlobClosure _codeblobs;
- BufferingOopClosure _buffered_oops;
G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty, bool must_claim_cld) :
_oops(g1h, pss),
_oops_in_cld(g1h, pss),
_clds(&_oops_in_cld, process_only_dirty, must_claim_cld),
- _codeblobs(&_oops),
- _buffered_oops(&_oops) {}
+ _codeblobs(&_oops) {}
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/heapRegionBounds.inline.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+JVMFlag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose) {
+ if (!UseG1GC) return JVMFlag::SUCCESS;
+
+ // Default value of G1RSetRegionEntries=0 means will be set ergonomically.
+ // Minimum value is 1.
+ if (FLAG_IS_CMDLINE(G1RSetRegionEntries) && (value < 1)) {
+ CommandLineError::print(verbose,
+ "G1RSetRegionEntries (" INTX_FORMAT ") must be "
+ "greater than or equal to 1\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose) {
+ if (!UseG1GC) return JVMFlag::SUCCESS;
+
+ // Default value of G1RSetSparseRegionEntries=0 means will be set ergonomically.
+ // Minimum value is 1.
+ if (FLAG_IS_CMDLINE(G1RSetSparseRegionEntries) && (value < 1)) {
+ CommandLineError::print(verbose,
+ "G1RSetSparseRegionEntries (" INTX_FORMAT ") must be "
+ "greater than or equal to 1\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose) {
+ if (!UseG1GC) return JVMFlag::SUCCESS;
+
+ // Default value of G1HeapRegionSize=0 means will be set ergonomically.
+ if (FLAG_IS_CMDLINE(G1HeapRegionSize) && (value < HeapRegionBounds::min_size())) {
+ CommandLineError::print(verbose,
+ "G1HeapRegionSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to ergonomic heap region minimum size\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose) {
+ if (!UseG1GC) return JVMFlag::SUCCESS;
+
+ if (value > G1MaxNewSizePercent) {
+ CommandLineError::print(verbose,
+ "G1NewSizePercent (" UINTX_FORMAT ") must be "
+ "less than or equal to G1MaxNewSizePercent (" UINTX_FORMAT ")\n",
+ value, G1MaxNewSizePercent);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose) {
+ if (!UseG1GC) return JVMFlag::SUCCESS;
+
+ if (value < G1NewSizePercent) {
+ CommandLineError::print(verbose,
+ "G1MaxNewSizePercent (" UINTX_FORMAT ") must be "
+ "greater than or equal to G1NewSizePercent (" UINTX_FORMAT ")\n",
+ value, G1NewSizePercent);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose) {
+ if (UseG1GC && FLAG_IS_CMDLINE(MaxGCPauseMillis) && (value >= GCPauseIntervalMillis)) {
+ CommandLineError::print(verbose,
+ "MaxGCPauseMillis (" UINTX_FORMAT ") must be "
+ "less than GCPauseIntervalMillis (" UINTX_FORMAT ")\n",
+ value, GCPauseIntervalMillis);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose) {
+ if (UseG1GC) {
+ if (FLAG_IS_CMDLINE(GCPauseIntervalMillis)) {
+ if (value < 1) {
+ CommandLineError::print(verbose,
+ "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
+ "greater than or equal to 1\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
+ CommandLineError::print(verbose,
+ "GCPauseIntervalMillis cannot be set "
+ "without setting MaxGCPauseMillis\n");
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ if (value <= MaxGCPauseMillis) {
+ CommandLineError::print(verbose,
+ "GCPauseIntervalMillis (" UINTX_FORMAT ") must be "
+ "greater than MaxGCPauseMillis (" UINTX_FORMAT ")\n",
+ value, MaxGCPauseMillis);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose) {
+#ifdef _LP64
+ // Overflow would happen for uint type variable of YoungGenSizer::_min_desired_young_length
+ // when the value to be assigned exceeds uint range.
+ // i.e. result of '(uint)(NewSize / region size(1~32MB))'
+ // So maximum of NewSize should be 'max_juint * 1M'
+ if (UseG1GC && (value > (max_juint * 1 * M))) {
+ CommandLineError::print(verbose,
+ "NewSize (" SIZE_FORMAT ") must be less than ergonomic maximum value\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif // _LP64
+ return JVMFlag::SUCCESS;
+}
+
+size_t MaxSizeForHeapAlignmentG1() {
+ return HeapRegionBounds::max_size();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/jvmFlagConstraintsG1.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
+#define SHARE_GC_G1_COMMANDLINEFLAGCONSTRAINTSG1_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// G1 Flag Constraints
+JVMFlag::Error G1RSetRegionEntriesConstraintFunc(intx value, bool verbose);
+JVMFlag::Error G1RSetSparseRegionEntriesConstraintFunc(intx value, bool verbose);
+JVMFlag::Error G1HeapRegionSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error G1NewSizePercentConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error G1MaxNewSizePercentConstraintFunc(uintx value, bool verbose);
+
+// G1 Subconstraints
+JVMFlag::Error MaxGCPauseMillisConstraintFuncG1(uintx value, bool verbose);
+JVMFlag::Error GCPauseIntervalMillisConstraintFuncG1(uintx value, bool verbose);
+JVMFlag::Error MaxSizeForHeapAlignmentG1(const char* name, size_t value, bool verbose);
+JVMFlag::Error NewSizeConstraintFuncG1(size_t value, bool verbose);
+
+size_t MaxSizeForHeapAlignmentG1();
+
+#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSG1_HPP
--- a/src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-Flag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose) {
- // Parallel GC passes ParallelGCThreads when creating GrowableArray as 'int' type parameter.
- // So can't exceed with "max_jint"
-
- if (UseParallelGC && (value > (uint)max_jint)) {
- CommandLineError::print(verbose,
- "ParallelGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to " UINT32_FORMAT " for Parallel GC\n",
- value, max_jint);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
- // InitialTenuringThreshold is only used for ParallelGC.
- if (UseParallelGC && (value > MaxTenuringThreshold)) {
- CommandLineError::print(verbose,
- "InitialTenuringThreshold (" UINTX_FORMAT ") must be "
- "less than or equal to MaxTenuringThreshold (" UINTX_FORMAT ")\n",
- value, MaxTenuringThreshold);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
- // As only ParallelGC uses InitialTenuringThreshold,
- // we don't need to compare InitialTenuringThreshold with MaxTenuringThreshold.
- if (UseParallelGC && (value < InitialTenuringThreshold)) {
- CommandLineError::print(verbose,
- "MaxTenuringThreshold (" UINTX_FORMAT ") must be "
- "greater than or equal to InitialTenuringThreshold (" UINTX_FORMAT ")\n",
- value, InitialTenuringThreshold);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
--- a/src/hotspot/share/gc/parallel/commandLineFlagConstraintsParallel.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
-#define SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// Parallel Subconstraints
-Flag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose);
-Flag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
-Flag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
-
-#endif // SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+JVMFlag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose) {
+ // Parallel GC passes ParallelGCThreads when creating GrowableArray as 'int' type parameter.
+ // So can't exceed with "max_jint"
+
+ if (UseParallelGC && (value > (uint)max_jint)) {
+ CommandLineError::print(verbose,
+ "ParallelGCThreads (" UINT32_FORMAT ") must be "
+ "less than or equal to " UINT32_FORMAT " for Parallel GC\n",
+ value, max_jint);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
+ // InitialTenuringThreshold is only used for ParallelGC.
+ if (UseParallelGC && (value > MaxTenuringThreshold)) {
+ CommandLineError::print(verbose,
+ "InitialTenuringThreshold (" UINTX_FORMAT ") must be "
+ "less than or equal to MaxTenuringThreshold (" UINTX_FORMAT ")\n",
+ value, MaxTenuringThreshold);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose) {
+ // As only ParallelGC uses InitialTenuringThreshold,
+ // we don't need to compare InitialTenuringThreshold with MaxTenuringThreshold.
+ if (UseParallelGC && (value < InitialTenuringThreshold)) {
+ CommandLineError::print(verbose,
+ "MaxTenuringThreshold (" UINTX_FORMAT ") must be "
+ "greater than or equal to InitialTenuringThreshold (" UINTX_FORMAT ")\n",
+ value, InitialTenuringThreshold);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/jvmFlagConstraintsParallel.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
+#define SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
+
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Parallel Subconstraints
+JVMFlag::Error ParallelGCThreadsConstraintFuncParallel(uint value, bool verbose);
+JVMFlag::Error InitialTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
+JVMFlag::Error MaxTenuringThresholdConstraintFuncParallel(uintx value, bool verbose);
+
+#endif // SHARE_GC_PARALLEL_COMMANDLINEFLAGCONSTRAINTSPARALLEL_HPP
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -51,6 +51,7 @@
#include "logging/log.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/vmThread.hpp"
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,8 +33,9 @@
#include "utilities/fakeRttiSupport.hpp"
#include "utilities/macros.hpp"
+class BarrierSetAssembler;
+class BarrierSetC1;
class JavaThread;
-class BarrierSetAssembler;
// This class provides the interface between a barrier implementation and
// the rest of the system.
@@ -68,6 +69,7 @@
private:
FakeRtti _fake_rtti;
BarrierSetAssembler* _barrier_set_assembler;
+ BarrierSetC1* _barrier_set_c1;
public:
// Metafunction mapping a class derived from BarrierSet to the
@@ -88,9 +90,12 @@
// End of fake RTTI support.
protected:
- BarrierSet(BarrierSetAssembler* barrier_set_assembler, const FakeRtti& fake_rtti) :
+ BarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ BarrierSetC1* barrier_set_c1,
+ const FakeRtti& fake_rtti) :
_fake_rtti(fake_rtti),
- _barrier_set_assembler(barrier_set_assembler) { }
+ _barrier_set_assembler(barrier_set_assembler),
+ _barrier_set_c1(barrier_set_c1) {}
~BarrierSet() { }
template <class BarrierSetAssemblerT>
@@ -98,6 +103,11 @@
return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
}
+ template <class BarrierSetC1T>
+ BarrierSetC1* make_barrier_set_c1() {
+ return COMPILER1_PRESENT(new BarrierSetC1T()) NOT_COMPILER1(NULL);
+ }
+
public:
// Support for optimizing compilers to call the barrier set on slow path allocations
// that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
@@ -123,6 +133,11 @@
return _barrier_set_assembler;
}
+ BarrierSetC1* barrier_set_c1() {
+ assert(_barrier_set_c1 != NULL, "should be set");
+ return _barrier_set_c1;
+ }
+
// The AccessBarrier of a BarrierSet subclass is called by the Access API
// (cf. oops/access.hpp) to perform decorated accesses. GC implementations
// may override these default access operations by declaring an
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "c1/c1_Defs.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "gc/shared/c1/barrierSetC1.hpp"
+#include "utilities/macros.hpp"
+
+#ifndef PATCHED_ADDR
+#define PATCHED_ADDR (max_jint)
+#endif
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+LIR_Opr BarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
+ DecoratorSet decorators = access.decorators();
+ bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
+ bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
+
+ LIRItem& base = access.base().item();
+ LIR_Opr offset = access.offset().opr();
+ LIRGenerator *gen = access.gen();
+
+ LIR_Opr addr_opr;
+ if (on_array) {
+ addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
+ } else if (needs_patching) {
+ // we need to patch the offset in the instruction so don't allow
+ // generate_address to try to be smart about emitting the -1.
+ // Otherwise the patching code won't know how to find the
+ // instruction to patch.
+ addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
+ } else {
+ addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
+ }
+
+ if (resolve_in_register) {
+ LIR_Opr resolved_addr = gen->new_pointer_register();
+ __ leal(addr_opr, resolved_addr);
+ resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
+ return resolved_addr;
+ } else {
+ return addr_opr;
+ }
+}
+
+void BarrierSetC1::store_at(LIRAccess& access, LIR_Opr value) {
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ assert(in_heap, "not supported yet");
+
+ LIR_Opr resolved = resolve_address(access, false);
+ access.set_resolved_addr(resolved);
+ store_at_resolved(access, value);
+}
+
+void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) {
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ assert(in_heap, "not supported yet");
+
+ LIR_Opr resolved = resolve_address(access, false);
+ access.set_resolved_addr(resolved);
+ load_at_resolved(access, result);
+}
+
+LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ assert(in_heap, "not supported yet");
+
+ access.load_address();
+
+ LIR_Opr resolved = resolve_address(access, true);
+ access.set_resolved_addr(resolved);
+ return atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+}
+
+LIR_Opr BarrierSetC1::atomic_xchg_at(LIRAccess& access, LIRItem& value) {
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ assert(in_heap, "not supported yet");
+
+ access.load_address();
+
+ LIR_Opr resolved = resolve_address(access, true);
+ access.set_resolved_addr(resolved);
+ return atomic_xchg_at_resolved(access, value);
+}
+
+LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
+ DecoratorSet decorators = access.decorators();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ assert(in_heap, "not supported yet");
+
+ access.load_address();
+
+ LIR_Opr resolved = resolve_address(access, true);
+ access.set_resolved_addr(resolved);
+ return atomic_add_at_resolved(access, value);
+}
+
+void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
+ DecoratorSet decorators = access.decorators();
+ bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
+ bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
+ bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
+ LIRGenerator* gen = access.gen();
+
+ if (mask_boolean) {
+ value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
+ }
+
+ if (is_volatile && os::is_MP()) {
+ __ membar_release();
+ }
+
+ LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
+ if (is_volatile && !needs_patching) {
+ gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
+ } else {
+ __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
+ }
+
+ if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
+ __ membar();
+ }
+}
+
+void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
+ LIRGenerator *gen = access.gen();
+ DecoratorSet decorators = access.decorators();
+ bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP();
+ bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
+ bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
+
+ if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
+ __ membar();
+ }
+
+ LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
+ if (is_volatile && !needs_patching) {
+ gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
+ } else {
+ __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
+ }
+
+ if (is_volatile && os::is_MP()) {
+ __ membar_acquire();
+ }
+
+ /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
+ if (mask_boolean) {
+ LabelObj* equalZeroLabel = new LabelObj();
+ __ cmp(lir_cond_equal, result, 0);
+ __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
+ __ move(LIR_OprFact::intConst(1), result);
+ __ branch_destination(equalZeroLabel->label());
+ }
+}
+
+LIR_Opr BarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ LIRGenerator *gen = access.gen();
+ return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
+}
+
+LIR_Opr BarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+ LIRGenerator *gen = access.gen();
+ return gen->atomic_xchg(access.type(), access.resolved_addr(), value);
+}
+
+LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
+ LIRGenerator *gen = access.gen();
+ return gen->atomic_add(access.type(), access.resolved_addr(), value);
+}
+
+void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
+ // We might be reading the value of the referent field of a
+ // Reference object in order to attach it back to the live
+ // object graph. If G1 is enabled then we need to record
+ // the value that is being returned in an SATB log buffer.
+ //
+ // We need to generate code similar to the following...
+ //
+ // if (offset == java_lang_ref_Reference::referent_offset) {
+ // if (src != NULL) {
+ // if (klass(src)->reference_type() != REF_NONE) {
+ // pre_barrier(..., value, ...);
+ // }
+ // }
+ // }
+
+ bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
+ bool gen_offset_check = true; // Assume we need to generate the offset guard.
+ bool gen_source_check = true; // Assume we need to check the src object for null.
+ bool gen_type_check = true; // Assume we need to check the reference_type.
+
+ LIRGenerator *gen = access.gen();
+
+ LIRItem& base = access.base().item();
+ LIR_Opr offset = access.offset().opr();
+
+ if (offset->is_constant()) {
+ LIR_Const* constant = offset->as_constant_ptr();
+ jlong off_con = (constant->type() == T_INT ?
+ (jlong)constant->as_jint() :
+ constant->as_jlong());
+
+
+ if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
+ // The constant offset is something other than referent_offset.
+ // We can skip generating/checking the remaining guards and
+ // skip generation of the code stub.
+ gen_pre_barrier = false;
+ } else {
+ // The constant offset is the same as referent_offset -
+ // we do not need to generate a runtime offset check.
+ gen_offset_check = false;
+ }
+ }
+
+ // We don't need to generate stub if the source object is an array
+ if (gen_pre_barrier && base.type()->is_array()) {
+ gen_pre_barrier = false;
+ }
+
+ if (gen_pre_barrier) {
+ // We still need to continue with the checks.
+ if (base.is_constant()) {
+ ciObject* src_con = base.get_jobject_constant();
+ guarantee(src_con != NULL, "no source constant");
+
+ if (src_con->is_null_object()) {
+ // The constant src object is null - We can skip
+ // generating the code stub.
+ gen_pre_barrier = false;
+ } else {
+ // Non-null constant source object. We still have to generate
+ // the slow stub - but we don't need to generate the runtime
+ // null object check.
+ gen_source_check = false;
+ }
+ }
+ }
+ if (gen_pre_barrier && !PatchALot) {
+ // Can the klass of object be statically determined to be
+ // a sub-class of Reference?
+ ciType* type = base.value()->declared_type();
+ if ((type != NULL) && type->is_loaded()) {
+ if (type->is_subtype_of(gen->compilation()->env()->Reference_klass())) {
+ gen_type_check = false;
+ } else if (type->is_klass() &&
+ !gen->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
+ // Not Reference and not Object klass.
+ gen_pre_barrier = false;
+ }
+ }
+ }
+
+ if (gen_pre_barrier) {
+ // We can have generate one runtime check here. Let's start with
+ // the offset check.
+ if (gen_offset_check) {
+ // if (offset != referent_offset) -> continue
+ // If offset is an int then we can do the comparison with the
+ // referent_offset constant; otherwise we need to move
+ // referent_offset into a temporary register and generate
+ // a reg-reg compare.
+
+ LIR_Opr referent_off;
+
+ if (offset->type() == T_INT) {
+ referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
+ } else {
+ assert(offset->type() == T_LONG, "what else?");
+ referent_off = gen->new_register(T_LONG);
+ __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
+ }
+ __ cmp(lir_cond_notEqual, offset, referent_off);
+ __ branch(lir_cond_notEqual, offset->type(), cont->label());
+ }
+ if (gen_source_check) {
+ // offset is a const and equals referent offset
+ // if (source == null) -> continue
+ __ cmp(lir_cond_equal, base.result(), LIR_OprFact::oopConst(NULL));
+ __ branch(lir_cond_equal, T_OBJECT, cont->label());
+ }
+ LIR_Opr src_klass = gen->new_register(T_OBJECT);
+ if (gen_type_check) {
+ // We have determined that offset == referent_offset && src != null.
+ // if (src->_klass->_reference_type == REF_NONE) -> continue
+ __ move(new LIR_Address(base.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
+ LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
+ LIR_Opr reference_type = gen->new_register(T_INT);
+ __ move(reference_type_addr, reference_type);
+ __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
+ __ branch(lir_cond_equal, T_INT, cont->label());
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_C1_BARRIERSETC1_HPP
+#define SHARE_GC_SHARED_C1_BARRIERSETC1_HPP
+
+#include "c1/c1_Decorators.hpp"
+#include "c1/c1_LIRGenerator.hpp"
+#include "c1/c1_Instruction.hpp"
+#include "c1/c1_LIR.hpp"
+#include "memory/allocation.hpp"
+
+class LIRGenerator;
+class LIRItem;
+
+// The LIRAddressOpr comprises either a LIRItem or a LIR_Opr to describe elements
+// of an access in the C1 Access API. Both of them allow asking for the opr() which
+// will correspond to either _item.result() or _opr if there is no _item.
+class LIRAddressOpr: public StackObj {
+ LIRItem* _item;
+ LIR_Opr _opr;
+public:
+ LIRAddressOpr(LIRItem& item) : _item(&item), _opr(NULL) {}
+ LIRAddressOpr(LIR_Opr opr) : _item(NULL), _opr(opr) {}
+ LIRAddressOpr(const LIRAddressOpr& other) : _item(other._item), _opr(other._opr) {}
+
+ LIRItem& item() const {
+ assert(_item != NULL, "sanity");
+ return *_item;
+ }
+
+ LIR_Opr opr() const {
+ if (_item == NULL) {
+ return _opr;
+ } else {
+ return _item->result();
+ }
+ }
+};
+
+// The LIRAccess class wraps shared context parameters required for performing
+// the right access in C1. This includes the address of the offset and the decorators.
+class LIRAccess: public StackObj {
+ LIRGenerator* _gen;
+ DecoratorSet _decorators;
+ LIRAddressOpr _base;
+ LIRAddressOpr _offset;
+ BasicType _type;
+ LIR_Opr _resolved_addr;
+ CodeEmitInfo* _patch_emit_info;
+ CodeEmitInfo* _access_emit_info;
+
+public:
+ LIRAccess(LIRGenerator* gen, DecoratorSet decorators,
+ LIRAddressOpr base, LIRAddressOpr offset, BasicType type,
+ CodeEmitInfo* patch_emit_info = NULL, CodeEmitInfo* access_emit_info = NULL) :
+ _gen(gen),
+ _decorators(AccessInternal::decorator_fixup(decorators)),
+ _base(base),
+ _offset(offset),
+ _type(type),
+ _resolved_addr(NULL),
+ _patch_emit_info(patch_emit_info),
+ _access_emit_info(access_emit_info) {}
+
+ void load_base() { _base.item().load_item(); }
+ void load_offset() { _offset.item().load_nonconstant(); }
+
+ void load_address() {
+ load_base();
+ load_offset();
+ }
+
+ LIRGenerator* gen() const { return _gen; }
+ CodeEmitInfo*& patch_emit_info() { return _patch_emit_info; }
+ CodeEmitInfo*& access_emit_info() { return _access_emit_info; }
+ LIRAddressOpr& base() { return _base; }
+ LIRAddressOpr& offset() { return _offset; }
+ BasicType type() const { return _type; }
+ LIR_Opr resolved_addr() const { return _resolved_addr; }
+ void set_resolved_addr(LIR_Opr addr) { _resolved_addr = addr; }
+ bool is_oop() const { return _type == T_ARRAY || _type == T_OBJECT; }
+ DecoratorSet decorators() const { return _decorators; }
+ bool is_raw() const { return (_decorators & AS_RAW) != 0; }
+};
+
+// The BarrierSetC1 class is the main entry point for the GC backend of the Access API in C1.
+// It is called by the LIRGenerator::access_* functions, which is the main entry poing for
+// access calls in C1.
+
+class BarrierSetC1: public CHeapObj<mtGC> {
+protected:
+ virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
+
+ virtual void generate_referent_check(LIRAccess& access, LabelObj* cont);
+
+ // Accesses with resolved address
+ virtual void store_at_resolved(LIRAccess& access, LIR_Opr value);
+ virtual void load_at_resolved(LIRAccess& access, LIR_Opr result);
+
+ virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+ virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+ virtual LIR_Opr atomic_add_at_resolved(LIRAccess& access, LIRItem& value);
+
+public:
+ virtual void store_at(LIRAccess& access, LIR_Opr value);
+ virtual void load_at(LIRAccess& access, LIR_Opr result);
+
+ virtual LIR_Opr atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+ virtual LIR_Opr atomic_xchg_at(LIRAccess& access, LIRItem& value);
+ virtual LIR_Opr atomic_add_at(LIRAccess& access, LIRItem& value);
+
+ virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob) {}
+};
+
+#endif // SHARE_GC_SHARED_C1_BARRIERSETC1_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/c1/cardTableBarrierSetC1.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "utilities/macros.hpp"
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprDesc* new_val) {
+ DecoratorSet decorators = access.decorators();
+ LIRGenerator* gen = access.gen();
+ bool in_heap = (decorators & IN_HEAP) != 0;
+ if (!in_heap) {
+ return;
+ }
+
+ BarrierSet* bs = BarrierSet::barrier_set();
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*(ct->byte_map_base())) == sizeof(jbyte), "adjust this code");
+ LIR_Const* card_table_base = new LIR_Const(ct->byte_map_base());
+ if (addr->is_address()) {
+ LIR_Address* address = addr->as_address_ptr();
+ // ptr cannot be an object because we use this barrier for array card marks
+ // and addr can point in the middle of an array.
+ LIR_Opr ptr = gen->new_pointer_register();
+ if (!address->index()->is_valid() && address->disp() == 0) {
+ __ move(address->base(), ptr);
+ } else {
+ assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
+ __ leal(addr, ptr);
+ }
+ addr = ptr;
+ }
+ assert(addr->is_register(), "must be a register at this point");
+
+#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
+ gen->CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
+#else
+ LIR_Opr tmp = gen->new_pointer_register();
+ if (TwoOperandLIRForm) {
+ __ move(addr, tmp);
+ __ unsigned_shift_right(tmp, CardTable::card_shift, tmp);
+ } else {
+ __ unsigned_shift_right(addr, CardTable::card_shift, tmp);
+ }
+
+ LIR_Address* card_addr;
+ if (gen->can_inline_as_constant(card_table_base)) {
+ card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
+ } else {
+ card_addr = new LIR_Address(tmp, gen->load_constant(card_table_base), T_BYTE);
+ }
+
+ LIR_Opr dirty = LIR_OprFact::intConst(CardTable::dirty_card_val());
+ if (UseCondCardMark) {
+ LIR_Opr cur_value = gen->new_register(T_INT);
+ if (ct->scanned_concurrently()) {
+ __ membar_storeload();
+ }
+ __ move(card_addr, cur_value);
+
+ LabelObj* L_already_dirty = new LabelObj();
+ __ cmp(lir_cond_equal, cur_value, dirty);
+ __ branch(lir_cond_equal, T_BYTE, L_already_dirty->label());
+ __ move(dirty, card_addr);
+ __ branch_destination(L_already_dirty->label());
+ } else {
+ if (ct->scanned_concurrently()) {
+ __ membar_storestore();
+ }
+ __ move(dirty, card_addr);
+ }
+#endif
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/c1/cardTableBarrierSetC1.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_C1_CARDTABLEBARRIERSETC1_HPP
+#define SHARE_GC_SHARED_C1_CARDTABLEBARRIERSETC1_HPP
+
+#include "gc/shared/c1/modRefBarrierSetC1.hpp"
+
+class CardTableBarrierSetC1 : public ModRefBarrierSetC1 {
+protected:
+ virtual void post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprDesc* new_val);
+};
+
+#endif // SHARE_GC_SHARED_C1_CARDTABLEBARRIERSETC1_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/c1/modRefBarrierSetC1.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/c1/modRefBarrierSetC1.hpp"
+#include "utilities/macros.hpp"
+
+#ifdef ASSERT
+#define __ gen->lir(__FILE__, __LINE__)->
+#else
+#define __ gen->lir()->
+#endif
+
+void ModRefBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
+ DecoratorSet decorators = access.decorators();
+ bool on_array = (decorators & IN_HEAP_ARRAY) != 0;
+ bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+
+ if (access.is_oop()) {
+ pre_barrier(access, access.resolved_addr(),
+ LIR_OprFact::illegalOpr /* pre_val */, access.patch_emit_info());
+ }
+
+ BarrierSetC1::store_at_resolved(access, value);
+
+ if (access.is_oop()) {
+ bool precise = on_array || on_anonymous;
+ LIR_Opr post_addr = precise ? access.resolved_addr() : access.base().opr();
+ post_barrier(access, post_addr, value);
+ }
+}
+
+LIR_Opr ModRefBarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
+ if (access.is_oop()) {
+ pre_barrier(access, access.resolved_addr(),
+ LIR_OprFact::illegalOpr /* pre_val */, NULL);
+ }
+
+ LIR_Opr result = BarrierSetC1::atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
+
+ if (access.is_oop()) {
+ post_barrier(access, access.resolved_addr(), new_value.result());
+ }
+
+ return result;
+}
+
+LIR_Opr ModRefBarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
+ if (access.is_oop()) {
+ pre_barrier(access, access.resolved_addr(),
+ LIR_OprFact::illegalOpr /* pre_val */, NULL);
+ }
+
+ LIR_Opr result = BarrierSetC1::atomic_xchg_at_resolved(access, value);
+
+ if (access.is_oop()) {
+ post_barrier(access, access.resolved_addr(), value.result());
+ }
+
+ return result;
+}
+
+// This overrides the default to resolve the address into a register,
+// assuming it will be used by a write barrier anyway.
+LIR_Opr ModRefBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
+ DecoratorSet decorators = access.decorators();
+ bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
+ bool is_write = (decorators & C1_WRITE_ACCESS) != 0;
+ resolve_in_register |= !needs_patching && is_write && access.is_oop();
+ return BarrierSetC1::resolve_address(access, resolve_in_register);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/c1/modRefBarrierSetC1.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_C1_MODREFBARRIERSETC1_HPP
+#define SHARE_GC_SHARED_C1_MODREFBARRIERSETC1_HPP
+
+#include "gc/shared/c1/barrierSetC1.hpp"
+
+// The ModRefBarrierSetC1 filters away accesses on BasicTypes other
+// than T_OBJECT/T_ARRAY (oops). The oop accesses call one of the protected
+// accesses, which are overridden in the concrete BarrierSetAssembler.
+
+class ModRefBarrierSetC1 : public BarrierSetC1 {
+protected:
+ virtual void pre_barrier(LIRAccess& access, LIR_Opr addr_opr,
+ LIR_Opr pre_val, CodeEmitInfo* info) {}
+ virtual void post_barrier(LIRAccess& access, LIR_OprDesc* addr,
+ LIR_OprDesc* new_val) {}
+
+ virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register);
+
+ virtual void store_at_resolved(LIRAccess& access, LIR_Opr value);
+
+ virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
+
+ virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value);
+};
+
+#endif // SHARE_GC_SHARED_C1_MODREFBARRIERSETC1_HPP
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -34,15 +34,22 @@
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "gc/shared/c1/cardTableBarrierSetC1.hpp"
+#endif
+
+class CardTableBarrierSetC1;
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
// enumerate ref fields that have been modified (since the last
// enumeration.)
CardTableBarrierSet::CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ BarrierSetC1* barrier_set_c1,
CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti) :
ModRefBarrierSet(barrier_set_assembler,
+ barrier_set_c1,
fake_rtti.add_tag(BarrierSet::CardTableBarrierSet)),
_defer_initial_card_mark(false),
_card_table(card_table)
@@ -50,6 +57,7 @@
CardTableBarrierSet::CardTableBarrierSet(CardTable* card_table) :
ModRefBarrierSet(make_barrier_set_assembler<CardTableBarrierSetAssembler>(),
+ make_barrier_set_c1<CardTableBarrierSetC1>(),
BarrierSet::FakeRtti(BarrierSet::CardTableBarrierSet)),
_defer_initial_card_mark(false),
_card_table(card_table)
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -53,6 +53,7 @@
CardTable* _card_table;
CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ BarrierSetC1* barrier_set_c1,
CardTable* card_table,
const BarrierSet::FakeRtti& fake_rtti);
--- a/src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,470 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/collectorPolicy.hpp"
-#include "gc/shared/commandLineFlagConstraintsGC.hpp"
-#include "gc/shared/plab.hpp"
-#include "gc/shared/threadLocalAllocBuffer.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/thread.inline.hpp"
-#include "utilities/align.hpp"
-#include "utilities/defaultStream.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/commandLineFlagConstraintsCMS.hpp"
-#include "gc/g1/commandLineFlagConstraintsG1.hpp"
-#include "gc/parallel/commandLineFlagConstraintsParallel.hpp"
-#endif
-#ifdef COMPILER1
-#include "c1/c1_globals.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "opto/c2_globals.hpp"
-#endif // COMPILER2
-
-// Some flags that have default values that indicate that the
-// JVM should automatically determine an appropriate value
-// for that flag. In those cases it is only appropriate for the
-// constraint checking to be done if the user has specified the
-// value(s) of the flag(s) on the command line. In the constraint
-// checking functions, FLAG_IS_CMDLINE() is used to check if
-// the flag has been set by the user and so should be checked.
-
-// As ParallelGCThreads differs among GC modes, we need constraint function.
-Flag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
-
-#if INCLUDE_ALL_GCS
- status = ParallelGCThreadsConstraintFuncParallel(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-
- status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-#endif
-
- return status;
-}
-
-// As ConcGCThreads should be smaller than ParallelGCThreads,
-// we need constraint function.
-Flag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
-#if INCLUDE_ALL_GCS
- // CMS and G1 GCs use ConcGCThreads.
- if ((UseConcMarkSweepGC || UseG1GC) && (value > ParallelGCThreads)) {
- CommandLineError::print(verbose,
- "ConcGCThreads (" UINT32_FORMAT ") must be "
- "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
- value, ParallelGCThreads);
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif
- return Flag::SUCCESS;
-}
-
-static Flag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
- if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
- name, value, PLAB::min_size());
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif // INCLUDE_ALL_GCS
- return Flag::SUCCESS;
-}
-
-Flag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
- if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
- name, value, PLAB::max_size());
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif // INCLUDE_ALL_GCS
- return Flag::SUCCESS;
-}
-
-static Flag::Error MinMaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
- Flag::Error status = MinPLABSizeBounds(name, value, verbose);
-
- if (status == Flag::SUCCESS) {
- return MaxPLABSizeBounds(name, value, verbose);
- }
- return status;
-}
-
-Flag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose) {
- return MinMaxPLABSizeBounds("YoungPLABSize", value, verbose);
-}
-
-Flag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
-
-#if INCLUDE_ALL_GCS
- if (UseConcMarkSweepGC) {
- return OldPLABSizeConstraintFuncCMS(value, verbose);
- } else {
- status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
- }
-#endif
- return status;
-}
-
-Flag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
- if (value > MaxHeapFreeRatio) {
- CommandLineError::print(verbose,
- "MinHeapFreeRatio (" UINTX_FORMAT ") must be "
- "less than or equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
- value, MaxHeapFreeRatio);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
- if (value < MinHeapFreeRatio) {
- CommandLineError::print(verbose,
- "MaxHeapFreeRatio (" UINTX_FORMAT ") must be "
- "greater than or equal to MinHeapFreeRatio (" UINTX_FORMAT ")\n",
- value, MinHeapFreeRatio);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-static Flag::Error CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(size_t maxHeap, intx softRef, bool verbose) {
- if ((softRef > 0) && ((maxHeap / M) > (max_uintx / softRef))) {
- CommandLineError::print(verbose,
- "Desired lifetime of SoftReferences cannot be expressed correctly. "
- "MaxHeapSize (" SIZE_FORMAT ") or SoftRefLRUPolicyMSPerMB "
- "(" INTX_FORMAT ") is too large\n",
- maxHeap, softRef);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose) {
- return CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(MaxHeapSize, value, verbose);
-}
-
-Flag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
- if (value > MarkStackSizeMax) {
- CommandLineError::print(verbose,
- "MarkStackSize (" SIZE_FORMAT ") must be "
- "less than or equal to MarkStackSizeMax (" SIZE_FORMAT ")\n",
- value, MarkStackSizeMax);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
- if (value > MaxMetaspaceFreeRatio) {
- CommandLineError::print(verbose,
- "MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
- "less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
- value, MaxMetaspaceFreeRatio);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
- if (value < MinMetaspaceFreeRatio) {
- CommandLineError::print(verbose,
- "MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
- "greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
- value, MinMetaspaceFreeRatio);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
- Flag::Error status = InitialTenuringThresholdConstraintFuncParallel(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-#endif
-
- return Flag::SUCCESS;
-}
-
-Flag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
- Flag::Error status = MaxTenuringThresholdConstraintFuncParallel(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-#endif
-
- // MaxTenuringThreshold=0 means NeverTenure=false && AlwaysTenure=true
- if ((value == 0) && (NeverTenure || !AlwaysTenure)) {
- CommandLineError::print(verbose,
- "MaxTenuringThreshold (0) should match to NeverTenure=false "
- "&& AlwaysTenure=true. But we have NeverTenure=%s "
- "AlwaysTenure=%s\n",
- NeverTenure ? "true" : "false",
- AlwaysTenure ? "true" : "false");
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
- Flag::Error status = MaxGCPauseMillisConstraintFuncG1(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-#endif
-
- return Flag::SUCCESS;
-}
-
-Flag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
-#if INCLUDE_ALL_GCS
- Flag::Error status = GCPauseIntervalMillisConstraintFuncG1(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-#endif
-
- return Flag::SUCCESS;
-}
-
-Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
- size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
- if (value > aligned_max) {
- CommandLineError::print(verbose,
- "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
- "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
- value, aligned_max);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-// To avoid an overflow by 'align_up(value, alignment)'.
-static Flag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
- size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
- if (value > aligned_max) {
- CommandLineError::print(verbose,
- "%s (" SIZE_FORMAT ") must be "
- "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
- name, value, aligned_max);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-static Flag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
- size_t heap_alignment;
-
-#if INCLUDE_ALL_GCS
- if (UseG1GC) {
- // For G1 GC, we don't know until G1CollectorPolicy is created.
- heap_alignment = MaxSizeForHeapAlignmentG1();
- } else
-#endif
- {
- heap_alignment = CollectorPolicy::compute_heap_alignment();
- }
-
- return MaxSizeForAlignment(name, value, heap_alignment, verbose);
-}
-
-Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose) {
- return MaxSizeForHeapAlignment("InitialHeapSize", value, verbose);
-}
-
-Flag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose) {
- Flag::Error status = MaxSizeForHeapAlignment("MaxHeapSize", value, verbose);
-
- if (status == Flag::SUCCESS) {
- status = CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(value, SoftRefLRUPolicyMSPerMB, verbose);
- }
- return status;
-}
-
-Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
- // If an overflow happened in Arguments::set_heap_size(), MaxHeapSize will have too large a value.
- // Check for this by ensuring that MaxHeapSize plus the requested min base address still fit within max_uintx.
- if (UseCompressedOops && FLAG_IS_ERGO(MaxHeapSize) && (value > (max_uintx - MaxHeapSize))) {
- CommandLineError::print(verbose,
- "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
- "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
- value, MaxHeapSize, max_uintx);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return MaxSizeForHeapAlignment("HeapBaseMinAddress", value, verbose);
-}
-
-Flag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
-#if INCLUDE_ALL_GCS
- Flag::Error status = NewSizeConstraintFuncG1(value, verbose);
- if (status != Flag::SUCCESS) {
- return status;
- }
-#endif
-
- return Flag::SUCCESS;
-}
-
-Flag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose) {
- // At least, alignment reserve area is needed.
- if (value < ThreadLocalAllocBuffer::alignment_reserve_in_bytes()) {
- CommandLineError::print(verbose,
- "MinTLABSize (" SIZE_FORMAT ") must be "
- "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n",
- value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
- return Flag::VIOLATES_CONSTRAINT;
- }
- if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
- CommandLineError::print(verbose,
- "MinTLABSize (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n",
- value, ThreadLocalAllocBuffer::max_size() * HeapWordSize);
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose) {
- // Skip for default value of zero which means set ergonomically.
- if (FLAG_IS_CMDLINE(TLABSize)) {
- if (value < MinTLABSize) {
- CommandLineError::print(verbose,
- "TLABSize (" SIZE_FORMAT ") must be "
- "greater than or equal to MinTLABSize (" SIZE_FORMAT ")\n",
- value, MinTLABSize);
- return Flag::VIOLATES_CONSTRAINT;
- }
- if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
- CommandLineError::print(verbose,
- "TLABSize (" SIZE_FORMAT ") must be "
- "less than or equal to ergonomic TLAB maximum size (" SIZE_FORMAT ")\n",
- value, (ThreadLocalAllocBuffer::max_size() * HeapWordSize));
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-// We will protect overflow from ThreadLocalAllocBuffer::record_slow_allocation(),
-// so AfterMemoryInit type is enough to check.
-Flag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
- if (UseTLAB) {
- size_t refill_waste_limit = Thread::current()->tlab().refill_waste_limit();
-
- // Compare with 'max_uintx' as ThreadLocalAllocBuffer::_refill_waste_limit is 'size_t'.
- if (refill_waste_limit > (max_uintx - value)) {
- CommandLineError::print(verbose,
- "TLABWasteIncrement (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
- value, (max_uintx - refill_waste_limit));
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
- if (FLAG_IS_CMDLINE(SurvivorRatio) &&
- (value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
- CommandLineError::print(verbose,
- "SurvivorRatio (" UINTX_FORMAT ") must be "
- "less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
- value,
- (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose) {
- if (value > MaxMetaspaceSize) {
- CommandLineError::print(verbose,
- "MetaspaceSize (" SIZE_FORMAT ") must be "
- "less than or equal to MaxMetaspaceSize (" SIZE_FORMAT ")\n",
- value, MaxMetaspaceSize);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
- if (value < MetaspaceSize) {
- CommandLineError::print(verbose,
- "MaxMetaspaceSize (" SIZE_FORMAT ") must be "
- "greater than or equal to MetaspaceSize (" SIZE_FORMAT ")\n",
- value, MaxMetaspaceSize);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose) {
- if (value != 0) {
- if (!is_power_of_2(value)) {
- CommandLineError::print(verbose,
- "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
- "power of 2\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- }
- if (value < ObjectAlignmentInBytes) {
- CommandLineError::print(verbose,
- "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
- "greater than or equal to ObjectAlignmentInBytes (" INTX_FORMAT ")\n",
- value, ObjectAlignmentInBytes);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
--- a/src/hotspot/share/gc/shared/commandLineFlagConstraintsGC.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
-#define SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
-
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/commandLineFlagConstraintsCMS.hpp"
-#include "gc/g1/commandLineFlagConstraintsG1.hpp"
-#include "gc/parallel/commandLineFlagConstraintsParallel.hpp"
-#endif
-
-/*
- * Here we have GC arguments constraints functions, which are called automatically
- * whenever flag's value changes. If the constraint fails the function should return
- * an appropriate error value.
- */
-
-Flag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose);
-Flag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose);
-Flag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose);
-Flag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose);
-Flag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose);
-
-Flag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose);
-Flag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose);
-Flag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose);
-Flag::Error NewSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error TLABSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose);
-Flag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose);
-Flag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose);
-Flag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose);
-
-// Internal
-Flag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose);
-
-#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -53,6 +53,7 @@
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/collectorPolicy.hpp"
+#include "gc/shared/jvmFlagConstraintsGC.hpp"
+#include "gc/shared/plab.hpp"
+#include "gc/shared/threadLocalAllocBuffer.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/thread.inline.hpp"
+#include "utilities/align.hpp"
+#include "utilities/defaultStream.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#include "gc/g1/jvmFlagConstraintsG1.hpp"
+#include "gc/parallel/jvmFlagConstraintsParallel.hpp"
+#endif
+#ifdef COMPILER1
+#include "c1/c1_globals.hpp"
+#endif // COMPILER1
+#ifdef COMPILER2
+#include "opto/c2_globals.hpp"
+#endif // COMPILER2
+
+// Some flags that have default values that indicate that the
+// JVM should automatically determine an appropriate value
+// for that flag. In those cases it is only appropriate for the
+// constraint checking to be done if the user has specified the
+// value(s) of the flag(s) on the command line. In the constraint
+// checking functions, FLAG_IS_CMDLINE() is used to check if
+// the flag has been set by the user and so should be checked.
+
+// As ParallelGCThreads differs among GC modes, we need constraint function.
+JVMFlag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+
+#if INCLUDE_ALL_GCS
+ status = ParallelGCThreadsConstraintFuncParallel(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+
+ status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+#endif
+
+ return status;
+}
+
+// As ConcGCThreads should be smaller than ParallelGCThreads,
+// we need constraint function.
+JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ // CMS and G1 GCs use ConcGCThreads.
+ if ((UseConcMarkSweepGC || UseG1GC) && (value > ParallelGCThreads)) {
+ CommandLineError::print(verbose,
+ "ConcGCThreads (" UINT32_FORMAT ") must be "
+ "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
+ value, ParallelGCThreads);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value < PLAB::min_size())) {
+ CommandLineError::print(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
+ name, value, PLAB::min_size());
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif // INCLUDE_ALL_GCS
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ if ((UseConcMarkSweepGC || UseG1GC || UseParallelGC) && (value > PLAB::max_size())) {
+ CommandLineError::print(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic PLAB maximum size (" SIZE_FORMAT ")\n",
+ name, value, PLAB::max_size());
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif // INCLUDE_ALL_GCS
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error MinMaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
+ JVMFlag::Error status = MinPLABSizeBounds(name, value, verbose);
+
+ if (status == JVMFlag::SUCCESS) {
+ return MaxPLABSizeBounds(name, value, verbose);
+ }
+ return status;
+}
+
+JVMFlag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose) {
+ return MinMaxPLABSizeBounds("YoungPLABSize", value, verbose);
+}
+
+JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+
+#if INCLUDE_ALL_GCS
+ if (UseConcMarkSweepGC) {
+ return OldPLABSizeConstraintFuncCMS(value, verbose);
+ } else {
+ status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
+ }
+#endif
+ return status;
+}
+
+JVMFlag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
+ if (value > MaxHeapFreeRatio) {
+ CommandLineError::print(verbose,
+ "MinHeapFreeRatio (" UINTX_FORMAT ") must be "
+ "less than or equal to MaxHeapFreeRatio (" UINTX_FORMAT ")\n",
+ value, MaxHeapFreeRatio);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose) {
+ if (value < MinHeapFreeRatio) {
+ CommandLineError::print(verbose,
+ "MaxHeapFreeRatio (" UINTX_FORMAT ") must be "
+ "greater than or equal to MinHeapFreeRatio (" UINTX_FORMAT ")\n",
+ value, MinHeapFreeRatio);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+static JVMFlag::Error CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(size_t maxHeap, intx softRef, bool verbose) {
+ if ((softRef > 0) && ((maxHeap / M) > (max_uintx / softRef))) {
+ CommandLineError::print(verbose,
+ "Desired lifetime of SoftReferences cannot be expressed correctly. "
+ "MaxHeapSize (" SIZE_FORMAT ") or SoftRefLRUPolicyMSPerMB "
+ "(" INTX_FORMAT ") is too large\n",
+ maxHeap, softRef);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose) {
+ return CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(MaxHeapSize, value, verbose);
+}
+
+JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose) {
+ if (value > MarkStackSizeMax) {
+ CommandLineError::print(verbose,
+ "MarkStackSize (" SIZE_FORMAT ") must be "
+ "less than or equal to MarkStackSizeMax (" SIZE_FORMAT ")\n",
+ value, MarkStackSizeMax);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
+ if (value > MaxMetaspaceFreeRatio) {
+ CommandLineError::print(verbose,
+ "MinMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
+ "less than or equal to MaxMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
+ value, MaxMetaspaceFreeRatio);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose) {
+ if (value < MinMetaspaceFreeRatio) {
+ CommandLineError::print(verbose,
+ "MaxMetaspaceFreeRatio (" UINTX_FORMAT ") must be "
+ "greater than or equal to MinMetaspaceFreeRatio (" UINTX_FORMAT ")\n",
+ value, MinMetaspaceFreeRatio);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ JVMFlag::Error status = InitialTenuringThresholdConstraintFuncParallel(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ JVMFlag::Error status = MaxTenuringThresholdConstraintFuncParallel(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+#endif
+
+ // MaxTenuringThreshold=0 means NeverTenure=false && AlwaysTenure=true
+ if ((value == 0) && (NeverTenure || !AlwaysTenure)) {
+ CommandLineError::print(verbose,
+ "MaxTenuringThreshold (0) should match to NeverTenure=false "
+ "&& AlwaysTenure=true. But we have NeverTenure=%s "
+ "AlwaysTenure=%s\n",
+ NeverTenure ? "true" : "false",
+ AlwaysTenure ? "true" : "false");
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ JVMFlag::Error status = MaxGCPauseMillisConstraintFuncG1(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ JVMFlag::Error status = GCPauseIntervalMillisConstraintFuncG1(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
+ size_t aligned_max = align_down(max_uintx/2, Metaspace::reserve_alignment_words());
+ if (value > aligned_max) {
+ CommandLineError::print(verbose,
+ "InitialBootClassLoaderMetaspaceSize (" SIZE_FORMAT ") must be "
+ "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+ value, aligned_max);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+// To avoid an overflow by 'align_up(value, alignment)'.
+static JVMFlag::Error MaxSizeForAlignment(const char* name, size_t value, size_t alignment, bool verbose) {
+ size_t aligned_max = ((max_uintx - alignment) & ~(alignment-1));
+ if (value > aligned_max) {
+ CommandLineError::print(verbose,
+ "%s (" SIZE_FORMAT ") must be "
+ "less than or equal to aligned maximum value (" SIZE_FORMAT ")\n",
+ name, value, aligned_max);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error MaxSizeForHeapAlignment(const char* name, size_t value, bool verbose) {
+ size_t heap_alignment;
+
+#if INCLUDE_ALL_GCS
+ if (UseG1GC) {
+ // For G1 GC, we don't know until G1CollectorPolicy is created.
+ heap_alignment = MaxSizeForHeapAlignmentG1();
+ } else
+#endif
+ {
+ heap_alignment = CollectorPolicy::compute_heap_alignment();
+ }
+
+ return MaxSizeForAlignment(name, value, heap_alignment, verbose);
+}
+
+JVMFlag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose) {
+ return MaxSizeForHeapAlignment("InitialHeapSize", value, verbose);
+}
+
+JVMFlag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose) {
+ JVMFlag::Error status = MaxSizeForHeapAlignment("MaxHeapSize", value, verbose);
+
+ if (status == JVMFlag::SUCCESS) {
+ status = CheckMaxHeapSizeAndSoftRefLRUPolicyMSPerMB(value, SoftRefLRUPolicyMSPerMB, verbose);
+ }
+ return status;
+}
+
+JVMFlag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose) {
+ // If an overflow happened in Arguments::set_heap_size(), MaxHeapSize will have too large a value.
+ // Check for this by ensuring that MaxHeapSize plus the requested min base address still fit within max_uintx.
+ if (UseCompressedOops && FLAG_IS_ERGO(MaxHeapSize) && (value > (max_uintx - MaxHeapSize))) {
+ CommandLineError::print(verbose,
+ "HeapBaseMinAddress (" SIZE_FORMAT ") or MaxHeapSize (" SIZE_FORMAT ") is too large. "
+ "Sum of them must be less than or equal to maximum of size_t (" SIZE_FORMAT ")\n",
+ value, MaxHeapSize, max_uintx);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return MaxSizeForHeapAlignment("HeapBaseMinAddress", value, verbose);
+}
+
+JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose) {
+#if INCLUDE_ALL_GCS
+ JVMFlag::Error status = NewSizeConstraintFuncG1(value, verbose);
+ if (status != JVMFlag::SUCCESS) {
+ return status;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose) {
+ // At least, alignment reserve area is needed.
+ if (value < ThreadLocalAllocBuffer::alignment_reserve_in_bytes()) {
+ CommandLineError::print(verbose,
+ "MinTLABSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to reserved area in TLAB (" SIZE_FORMAT ")\n",
+ value, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
+ CommandLineError::print(verbose,
+ "MinTLABSize (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic TLAB maximum (" SIZE_FORMAT ")\n",
+ value, ThreadLocalAllocBuffer::max_size() * HeapWordSize);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error TLABSizeConstraintFunc(size_t value, bool verbose) {
+ // Skip for default value of zero which means set ergonomically.
+ if (FLAG_IS_CMDLINE(TLABSize)) {
+ if (value < MinTLABSize) {
+ CommandLineError::print(verbose,
+ "TLABSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to MinTLABSize (" SIZE_FORMAT ")\n",
+ value, MinTLABSize);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ if (value > (ThreadLocalAllocBuffer::max_size() * HeapWordSize)) {
+ CommandLineError::print(verbose,
+ "TLABSize (" SIZE_FORMAT ") must be "
+ "less than or equal to ergonomic TLAB maximum size (" SIZE_FORMAT ")\n",
+ value, (ThreadLocalAllocBuffer::max_size() * HeapWordSize));
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+// We will protect overflow from ThreadLocalAllocBuffer::record_slow_allocation(),
+// so AfterMemoryInit type is enough to check.
+JVMFlag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose) {
+ if (UseTLAB) {
+ size_t refill_waste_limit = Thread::current()->tlab().refill_waste_limit();
+
+ // Compare with 'max_uintx' as ThreadLocalAllocBuffer::_refill_waste_limit is 'size_t'.
+ if (refill_waste_limit > (max_uintx - value)) {
+ CommandLineError::print(verbose,
+ "TLABWasteIncrement (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic TLAB waste increment maximum size(" SIZE_FORMAT ")\n",
+ value, (max_uintx - refill_waste_limit));
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose) {
+ if (FLAG_IS_CMDLINE(SurvivorRatio) &&
+ (value > (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()))) {
+ CommandLineError::print(verbose,
+ "SurvivorRatio (" UINTX_FORMAT ") must be "
+ "less than or equal to ergonomic SurvivorRatio maximum (" SIZE_FORMAT ")\n",
+ value,
+ (MaxHeapSize / Universe::heap()->collector_policy()->space_alignment()));
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose) {
+ if (value > MaxMetaspaceSize) {
+ CommandLineError::print(verbose,
+ "MetaspaceSize (" SIZE_FORMAT ") must be "
+ "less than or equal to MaxMetaspaceSize (" SIZE_FORMAT ")\n",
+ value, MaxMetaspaceSize);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose) {
+ if (value < MetaspaceSize) {
+ CommandLineError::print(verbose,
+ "MaxMetaspaceSize (" SIZE_FORMAT ") must be "
+ "greater than or equal to MetaspaceSize (" SIZE_FORMAT ")\n",
+ value, MaxMetaspaceSize);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose) {
+ if (value != 0) {
+ if (!is_power_of_2(value)) {
+ CommandLineError::print(verbose,
+ "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
+ "power of 2\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ if (value < ObjectAlignmentInBytes) {
+ CommandLineError::print(verbose,
+ "SurvivorAlignmentInBytes (" INTX_FORMAT ") must be "
+ "greater than or equal to ObjectAlignmentInBytes (" INTX_FORMAT ")\n",
+ value, ObjectAlignmentInBytes);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
+#define SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/cms/jvmFlagConstraintsCMS.hpp"
+#include "gc/g1/jvmFlagConstraintsG1.hpp"
+#include "gc/parallel/jvmFlagConstraintsParallel.hpp"
+#endif
+
+/*
+ * Here we have GC arguments constraints functions, which are called automatically
+ * whenever flag's value changes. If the constraint fails the function should return
+ * an appropriate error value.
+ */
+
+JVMFlag::Error ParallelGCThreadsConstraintFunc(uint value, bool verbose);
+JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose);
+JVMFlag::Error YoungPLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MinHeapFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MaxHeapFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error SoftRefLRUPolicyMSPerMBConstraintFunc(intx value, bool verbose);
+JVMFlag::Error MarkStackSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MinMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MaxMetaspaceFreeRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error InitialTenuringThresholdConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MaxTenuringThresholdConstraintFunc(uintx value, bool verbose);
+
+JVMFlag::Error MaxGCPauseMillisConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error GCPauseIntervalMillisConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error InitialBootClassLoaderMetaspaceSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error InitialHeapSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MaxHeapSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error HeapBaseMinAddressConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error NewSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MinTLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error TLABSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error TLABWasteIncrementConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error SurvivorRatioConstraintFunc(uintx value, bool verbose);
+JVMFlag::Error MetaspaceSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error MaxMetaspaceSizeConstraintFunc(size_t value, bool verbose);
+JVMFlag::Error SurvivorAlignmentInBytesConstraintFunc(intx value, bool verbose);
+
+// Internal
+JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose);
+
+#endif // SHARE_GC_SHARED_COMMANDLINEFLAGCONSTRAINTSGC_HPP
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,8 +33,10 @@
class ModRefBarrierSet: public BarrierSet {
protected:
ModRefBarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ BarrierSetC1* barrier_set_c1,
const BarrierSet::FakeRtti& fake_rtti)
: BarrierSet(barrier_set_assembler,
+ barrier_set_c1,
fake_rtti.add_tag(BarrierSet::ModRef)) { }
~ModRefBarrierSet() { }
--- a/src/hotspot/share/gc/shared/taskqueue.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/taskqueue.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -285,9 +285,10 @@
inline bool push(E t);
// Attempts to claim a task from the "local" end of the queue (the most
- // recently pushed). If successful, returns true and sets t to the task;
- // otherwise, returns false (the queue is empty).
- inline bool pop_local(volatile E& t);
+ // recently pushed) as long as the number of entries exceeds the threshold.
+ // If successful, returns true and sets t to the task; otherwise, returns false
+ // (the queue is empty or the number of elements below the threshold).
+ inline bool pop_local(volatile E& t, uint threshold = 0);
// Like pop_local(), but uses the "global" end of the queue (the least
// recently pushed).
--- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -152,7 +152,7 @@
}
template<class E, MEMFLAGS F, unsigned int N> inline bool
-GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
+GenericTaskQueue<E, F, N>::pop_local(volatile E& t, uint threshold) {
uint localBot = _bottom;
// This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method
@@ -160,7 +160,7 @@
// since this is pop_local.)
uint dirty_n_elems = dirty_size(localBot, _age.top());
assert(dirty_n_elems != N - 1, "Shouldn't be possible...");
- if (dirty_n_elems == 0) return false;
+ if (dirty_n_elems <= threshold) return false;
localBot = decrement_index(localBot);
_bottom = localBot;
// This is necessary to prevent any read below from being reordered
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -37,6 +37,7 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/jvmciRuntime.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
@@ -125,7 +126,7 @@
}
ResourceMark rm;
const char* cstring = java_lang_String::as_utf8_string(name());
- Flag* flag = Flag::find_flag(cstring, strlen(cstring), /* allow_locked */ true, /* return_flag */ true);
+ JVMFlag* flag = JVMFlag::find_flag(cstring, strlen(cstring), /* allow_locked */ true, /* return_flag */ true);
if (flag == NULL) {
return c2vm;
}
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -29,6 +29,7 @@
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/resourceHash.hpp"
@@ -378,9 +379,9 @@
#define COUNT_FLAG(ignore) +1
#ifdef ASSERT
#define CHECK_FLAG(type, name) { \
- Flag* flag = Flag::find_flag(#name, strlen(#name), /*allow_locked*/ true, /* return_flag */ true); \
+ JVMFlag* flag = JVMFlag::find_flag(#name, strlen(#name), /*allow_locked*/ true, /* return_flag */ true); \
assert(flag != NULL, "No such flag named " #name); \
- assert(flag->is_##type(), "Flag " #name " is not of type " #type); \
+ assert(flag->is_##type(), "JVMFlag " #name " is not of type " #type); \
}
#else
#define CHECK_FLAG(type, name)
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -35,6 +35,7 @@
#include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/globals.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
@@ -146,16 +147,16 @@
nonstatic_field(Deoptimization::UnrollBlock, _initial_info, intptr_t) \
nonstatic_field(Deoptimization::UnrollBlock, _unpack_kind, int) \
\
- nonstatic_field(ExceptionTableElement, start_pc, u2) \
- nonstatic_field(ExceptionTableElement, end_pc, u2) \
- nonstatic_field(ExceptionTableElement, handler_pc, u2) \
- nonstatic_field(ExceptionTableElement, catch_type_index, u2) \
+ nonstatic_field(ExceptionTableElement, start_pc, u2) \
+ nonstatic_field(ExceptionTableElement, end_pc, u2) \
+ nonstatic_field(ExceptionTableElement, handler_pc, u2) \
+ nonstatic_field(ExceptionTableElement, catch_type_index, u2) \
\
- nonstatic_field(Flag, _type, const char*) \
- nonstatic_field(Flag, _name, const char*) \
- unchecked_nonstatic_field(Flag, _addr, sizeof(void*)) \
- nonstatic_field(Flag, _flags, Flag::Flags) \
- static_field(Flag, flags, Flag*) \
+ nonstatic_field(JVMFlag, _type, const char*) \
+ nonstatic_field(JVMFlag, _name, const char*) \
+ unchecked_nonstatic_field(JVMFlag, _addr, sizeof(void*)) \
+ nonstatic_field(JVMFlag, _flags, JVMFlag::Flags) \
+ static_field(JVMFlag, flags, JVMFlag*) \
\
nonstatic_field(InstanceKlass, _fields, Array<u2>*) \
nonstatic_field(InstanceKlass, _constants, ConstantPool*) \
@@ -345,8 +346,8 @@
declare_toplevel_type(BasicLock) \
declare_toplevel_type(CompilerToVM) \
declare_toplevel_type(ExceptionTableElement) \
- declare_toplevel_type(Flag) \
- declare_toplevel_type(Flag*) \
+ declare_toplevel_type(JVMFlag) \
+ declare_toplevel_type(JVMFlag*) \
declare_toplevel_type(InvocationCounter) \
declare_toplevel_type(JVMCIEnv) \
declare_toplevel_type(LocalVariableTableElement) \
--- a/src/hotspot/share/memory/universe.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/memory/universe.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -63,7 +63,8 @@
#include "prims/resolvedMethodTable.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
+#include "runtime/flags/flagSetting.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
@@ -701,7 +702,7 @@
AOTLoader::universe_init();
// Checks 'AfterMemoryInit' constraints.
- if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) {
+ if (!JVMFlagConstraintList::check_constraints(JVMFlagConstraint::AfterMemoryInit)) {
return JNI_EINVAL;
}
--- a/src/hotspot/share/oops/accessBackend.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/oops/accessBackend.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -980,31 +980,6 @@
}
};
- // This class adds implied decorators that follow according to decorator rules.
- // For example adding default reference strength and default memory ordering
- // semantics.
- template <DecoratorSet input_decorators>
- struct DecoratorFixup: AllStatic {
- // If no reference strength has been picked, then strong will be picked
- static const DecoratorSet ref_strength_default = input_decorators |
- (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
- ON_STRONG_OOP_REF : INTERNAL_EMPTY);
- // If no memory ordering has been picked, unordered will be picked
- static const DecoratorSet memory_ordering_default = ref_strength_default |
- ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
- // If no barrier strength has been picked, normal will be used
- static const DecoratorSet barrier_strength_default = memory_ordering_default |
- ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
- // Heap array accesses imply it is a heap access
- static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
- ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
- static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
- ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
- static const DecoratorSet archive_root_is_root = conc_root_is_root |
- ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
- static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
- };
-
// Step 2: Reduce types.
// Enforce that for non-oop types, T and P have to be strictly the same.
// P is the type of the address and T is the type of the values.
--- a/src/hotspot/share/oops/accessDecorators.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/oops/accessDecorators.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,6 +25,11 @@
#ifndef SHARE_OOPS_ACCESSDECORATORS_HPP
#define SHARE_OOPS_ACCESSDECORATORS_HPP
+#include "gc/shared/barrierSetConfig.hpp"
+#include "memory/allocation.hpp"
+#include "metaprogramming/integralConstant.hpp"
+#include "utilities/globalDefinitions.hpp"
+
// A decorator is an attribute or property that affects the way a memory access is performed in some way.
// There are different groups of decorators. Some have to do with memory ordering, others to do with,
// e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
@@ -216,4 +221,58 @@
ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
+// Keep track of the last decorator.
+const DecoratorSet DECORATOR_LAST = UCONST64(1) << 30;
+
+namespace AccessInternal {
+ // This class adds implied decorators that follow according to decorator rules.
+ // For example adding default reference strength and default memory ordering
+ // semantics.
+ template <DecoratorSet input_decorators>
+ struct DecoratorFixup: AllStatic {
+ // If no reference strength has been picked, then strong will be picked
+ static const DecoratorSet ref_strength_default = input_decorators |
+ (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
+ ON_STRONG_OOP_REF : INTERNAL_EMPTY);
+ // If no memory ordering has been picked, unordered will be picked
+ static const DecoratorSet memory_ordering_default = ref_strength_default |
+ ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
+ // If no barrier strength has been picked, normal will be used
+ static const DecoratorSet barrier_strength_default = memory_ordering_default |
+ ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
+ // Heap array accesses imply it is a heap access
+ static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
+ ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
+ static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
+ ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+ static const DecoratorSet archive_root_is_root = conc_root_is_root |
+ ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+ static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
+ };
+
+ // This function implements the above DecoratorFixup rules, but without meta
+ // programming for code generation that does not use templates.
+ inline DecoratorSet decorator_fixup(DecoratorSet input_decorators) {
+ // If no reference strength has been picked, then strong will be picked
+ DecoratorSet ref_strength_default = input_decorators |
+ (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
+ ON_STRONG_OOP_REF : INTERNAL_EMPTY);
+ // If no memory ordering has been picked, unordered will be picked
+ DecoratorSet memory_ordering_default = ref_strength_default |
+ ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
+ // If no barrier strength has been picked, normal will be used
+ DecoratorSet barrier_strength_default = memory_ordering_default |
+ ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
+ // Heap array accesses imply it is a heap access
+ DecoratorSet heap_array_is_in_heap = barrier_strength_default |
+ ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
+ DecoratorSet conc_root_is_root = heap_array_is_in_heap |
+ ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+ DecoratorSet archive_root_is_root = conc_root_is_root |
+ ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
+ DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
+ return value;
+ }
+}
+
#endif // SHARE_OOPS_ACCESSDECORATORS_HPP
--- a/src/hotspot/share/oops/klassVtable.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/oops/klassVtable.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -38,6 +38,7 @@
#include "oops/objArrayOop.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "utilities/copy.hpp"
--- a/src/hotspot/share/opto/node.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/opto/node.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -700,7 +700,7 @@
//------------------------------is_unreachable---------------------------------
bool Node::is_unreachable(PhaseIterGVN &igvn) const {
assert(!is_Mach(), "doesn't work with MachNodes");
- return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top();
+ return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top());
}
//------------------------------add_req----------------------------------------
--- a/src/hotspot/share/opto/subnode.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/opto/subnode.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -448,7 +448,12 @@
public:
SqrtFNode(Compile* C, Node *c, Node *in1) : Node(c, in1) {
init_flags(Flag_is_expensive);
- C->add_expensive_node(this);
+ if (c != NULL) {
+ // Treat node only as expensive if a control input is set because it might
+ // be created from a SqrtDNode in ConvD2FNode::Ideal() that was found to
+ // be unique and therefore has no control input.
+ C->add_expensive_node(this);
+ }
}
virtual int Opcode() const;
const Type *bottom_type() const { return Type::FLOAT; }
--- a/src/hotspot/share/opto/superword.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/opto/superword.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -2144,8 +2144,21 @@
// we use the memory state of the last load. However, if any load could
// not be moved down due to the dependence constraint, we use the memory
// state of the first load.
- Node* last_mem = executed_last(pk)->in(MemNode::Memory);
- Node* first_mem = executed_first(pk)->in(MemNode::Memory);
+ Node* first_mem = pk->at(0)->in(MemNode::Memory);
+ Node* last_mem = first_mem;
+ for (uint i = 1; i < pk->size(); i++) {
+ Node* ld = pk->at(i);
+ Node* mem = ld->in(MemNode::Memory);
+ assert(in_bb(first_mem) || in_bb(mem) || mem == first_mem, "2 different memory state from outside the loop?");
+ if (in_bb(mem)) {
+ if (in_bb(first_mem) && bb_idx(mem) < bb_idx(first_mem)) {
+ first_mem = mem;
+ }
+ if (!in_bb(last_mem) || bb_idx(mem) > bb_idx(last_mem)) {
+ last_mem = mem;
+ }
+ }
+ }
bool schedule_last = true;
for (uint i = 0; i < pk->size(); i++) {
Node* ld = pk->at(i);
--- a/src/hotspot/share/precompiled/precompiled.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/precompiled/precompiled.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -101,6 +101,7 @@
# include "gc/shared/genCollectedHeap.hpp"
# include "gc/shared/generation.hpp"
# include "gc/shared/generationCounters.hpp"
+# include "gc/shared/jvmFlagConstraintsGC.hpp"
# include "gc/shared/modRefBarrierSet.hpp"
# include "gc/shared/referencePolicy.hpp"
# include "gc/shared/referenceProcessor.hpp"
@@ -163,6 +164,13 @@
# include "runtime/extendedPC.hpp"
# include "runtime/fieldDescriptor.hpp"
# include "runtime/fieldType.hpp"
+# include "runtime/flags/flagSetting.hpp"
+# include "runtime/flags/jvmFlag.hpp"
+# include "runtime/flags/jvmFlagConstraintList.hpp"
+# include "runtime/flags/jvmFlagConstraintsCompiler.hpp"
+# include "runtime/flags/jvmFlagConstraintsRuntime.hpp"
+# include "runtime/flags/jvmFlagRangeList.hpp"
+# include "runtime/flags/jvmFlagWriteableList.hpp"
# include "runtime/frame.hpp"
# include "runtime/frame.inline.hpp"
# include "runtime/globals.hpp"
@@ -292,6 +300,7 @@
# include "gc/cms/concurrentMarkSweepGeneration.hpp"
# include "gc/cms/freeChunk.hpp"
# include "gc/cms/gSpaceCounters.hpp"
+# include "gc/cms/jvmFlagConstraintsCMS.hpp"
# include "gc/cms/parOopClosures.hpp"
# include "gc/cms/promotionInfo.hpp"
# include "gc/cms/yieldingWorkgroup.hpp"
@@ -299,10 +308,12 @@
# include "gc/g1/g1BlockOffsetTable.hpp"
# include "gc/g1/g1OopClosures.hpp"
# include "gc/g1/g1_globals.hpp"
+# include "gc/g1/jvmFlagConstraintsG1.hpp"
# include "gc/g1/ptrQueue.hpp"
# include "gc/g1/satbMarkQueue.hpp"
# include "gc/parallel/gcAdaptivePolicyCounters.hpp"
# include "gc/parallel/immutableSpace.hpp"
+# include "gc/parallel/jvmFlagConstraintsParallel.hpp"
# include "gc/parallel/mutableSpace.hpp"
# include "gc/parallel/objectStartArray.hpp"
# include "gc/parallel/parMarkBitMap.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/prims/whitebox.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -53,6 +53,7 @@
#include "runtime/arguments.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handshake.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -971,29 +972,29 @@
WB_END
template <typename T>
-static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, Flag::Error (*TAt)(const char*, T*, bool, bool)) {
+static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, JVMFlag::Error (*TAt)(const char*, T*, bool, bool)) {
if (name == NULL) {
return false;
}
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
CHECK_JNI_EXCEPTION_(env, false);
- Flag::Error result = (*TAt)(flag_name, value, true, true);
+ JVMFlag::Error result = (*TAt)(flag_name, value, true, true);
env->ReleaseStringUTFChars(name, flag_name);
- return (result == Flag::SUCCESS);
+ return (result == JVMFlag::SUCCESS);
}
template <typename T>
-static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, Flag::Error (*TAtPut)(const char*, T*, Flag::Flags)) {
+static bool SetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, JVMFlag::Error (*TAtPut)(const char*, T*, JVMFlag::Flags)) {
if (name == NULL) {
return false;
}
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
CHECK_JNI_EXCEPTION_(env, false);
- Flag::Error result = (*TAtPut)(flag_name, value, Flag::INTERNAL);
+ JVMFlag::Error result = (*TAtPut)(flag_name, value, JVMFlag::INTERNAL);
env->ReleaseStringUTFChars(name, flag_name);
- return (result == Flag::SUCCESS);
+ return (result == JVMFlag::SUCCESS);
}
template <typename T>
@@ -1026,28 +1027,28 @@
return box(thread, env, vmSymbols::java_lang_Double(), vmSymbols::Double_valueOf_signature(), value);
}
-static Flag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
+static JVMFlag* getVMFlag(JavaThread* thread, JNIEnv* env, jstring name) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
const char* flag_name = env->GetStringUTFChars(name, NULL);
CHECK_JNI_EXCEPTION_(env, NULL);
- Flag* result = Flag::find_flag(flag_name, strlen(flag_name), true, true);
+ JVMFlag* result = JVMFlag::find_flag(flag_name, strlen(flag_name), true, true);
env->ReleaseStringUTFChars(name, flag_name);
return result;
}
WB_ENTRY(jboolean, WB_IsConstantVMFlag(JNIEnv* env, jobject o, jstring name))
- Flag* flag = getVMFlag(thread, env, name);
+ JVMFlag* flag = getVMFlag(thread, env, name);
return (flag != NULL) && flag->is_constant_in_binary();
WB_END
WB_ENTRY(jboolean, WB_IsLockedVMFlag(JNIEnv* env, jobject o, jstring name))
- Flag* flag = getVMFlag(thread, env, name);
+ JVMFlag* flag = getVMFlag(thread, env, name);
return (flag != NULL) && !(flag->is_unlocked() || flag->is_unlocker());
WB_END
WB_ENTRY(jobject, WB_GetBooleanVMFlag(JNIEnv* env, jobject o, jstring name))
bool result;
- if (GetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAt)) {
+ if (GetVMFlag <bool> (thread, env, name, &result, &JVMFlag::boolAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return booleanBox(thread, env, result);
}
@@ -1056,7 +1057,7 @@
WB_ENTRY(jobject, WB_GetIntVMFlag(JNIEnv* env, jobject o, jstring name))
int result;
- if (GetVMFlag <int> (thread, env, name, &result, &CommandLineFlags::intAt)) {
+ if (GetVMFlag <int> (thread, env, name, &result, &JVMFlag::intAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
@@ -1065,7 +1066,7 @@
WB_ENTRY(jobject, WB_GetUintVMFlag(JNIEnv* env, jobject o, jstring name))
uint result;
- if (GetVMFlag <uint> (thread, env, name, &result, &CommandLineFlags::uintAt)) {
+ if (GetVMFlag <uint> (thread, env, name, &result, &JVMFlag::uintAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
@@ -1074,7 +1075,7 @@
WB_ENTRY(jobject, WB_GetIntxVMFlag(JNIEnv* env, jobject o, jstring name))
intx result;
- if (GetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAt)) {
+ if (GetVMFlag <intx> (thread, env, name, &result, &JVMFlag::intxAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
@@ -1083,7 +1084,7 @@
WB_ENTRY(jobject, WB_GetUintxVMFlag(JNIEnv* env, jobject o, jstring name))
uintx result;
- if (GetVMFlag <uintx> (thread, env, name, &result, &CommandLineFlags::uintxAt)) {
+ if (GetVMFlag <uintx> (thread, env, name, &result, &JVMFlag::uintxAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
@@ -1092,7 +1093,7 @@
WB_ENTRY(jobject, WB_GetUint64VMFlag(JNIEnv* env, jobject o, jstring name))
uint64_t result;
- if (GetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAt)) {
+ if (GetVMFlag <uint64_t> (thread, env, name, &result, &JVMFlag::uint64_tAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
@@ -1101,7 +1102,7 @@
WB_ENTRY(jobject, WB_GetSizeTVMFlag(JNIEnv* env, jobject o, jstring name))
uintx result;
- if (GetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAt)) {
+ if (GetVMFlag <size_t> (thread, env, name, &result, &JVMFlag::size_tAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return longBox(thread, env, result);
}
@@ -1110,7 +1111,7 @@
WB_ENTRY(jobject, WB_GetDoubleVMFlag(JNIEnv* env, jobject o, jstring name))
double result;
- if (GetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAt)) {
+ if (GetVMFlag <double> (thread, env, name, &result, &JVMFlag::doubleAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
return doubleBox(thread, env, result);
}
@@ -1119,7 +1120,7 @@
WB_ENTRY(jstring, WB_GetStringVMFlag(JNIEnv* env, jobject o, jstring name))
ccstr ccstrResult;
- if (GetVMFlag <ccstr> (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAt)) {
+ if (GetVMFlag <ccstr> (thread, env, name, &ccstrResult, &JVMFlag::ccstrAt)) {
ThreadToNativeFromVM ttnfv(thread); // can't be in VM when we call JNI
jstring result = env->NewStringUTF(ccstrResult);
CHECK_JNI_EXCEPTION_(env, NULL);
@@ -1130,42 +1131,42 @@
WB_ENTRY(void, WB_SetBooleanVMFlag(JNIEnv* env, jobject o, jstring name, jboolean value))
bool result = value == JNI_TRUE ? true : false;
- SetVMFlag <bool> (thread, env, name, &result, &CommandLineFlags::boolAtPut);
+ SetVMFlag <bool> (thread, env, name, &result, &JVMFlag::boolAtPut);
WB_END
WB_ENTRY(void, WB_SetIntVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
int result = value;
- SetVMFlag <int> (thread, env, name, &result, &CommandLineFlags::intAtPut);
+ SetVMFlag <int> (thread, env, name, &result, &JVMFlag::intAtPut);
WB_END
WB_ENTRY(void, WB_SetUintVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
uint result = value;
- SetVMFlag <uint> (thread, env, name, &result, &CommandLineFlags::uintAtPut);
+ SetVMFlag <uint> (thread, env, name, &result, &JVMFlag::uintAtPut);
WB_END
WB_ENTRY(void, WB_SetIntxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
intx result = value;
- SetVMFlag <intx> (thread, env, name, &result, &CommandLineFlags::intxAtPut);
+ SetVMFlag <intx> (thread, env, name, &result, &JVMFlag::intxAtPut);
WB_END
WB_ENTRY(void, WB_SetUintxVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
uintx result = value;
- SetVMFlag <uintx> (thread, env, name, &result, &CommandLineFlags::uintxAtPut);
+ SetVMFlag <uintx> (thread, env, name, &result, &JVMFlag::uintxAtPut);
WB_END
WB_ENTRY(void, WB_SetUint64VMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
uint64_t result = value;
- SetVMFlag <uint64_t> (thread, env, name, &result, &CommandLineFlags::uint64_tAtPut);
+ SetVMFlag <uint64_t> (thread, env, name, &result, &JVMFlag::uint64_tAtPut);
WB_END
WB_ENTRY(void, WB_SetSizeTVMFlag(JNIEnv* env, jobject o, jstring name, jlong value))
size_t result = value;
- SetVMFlag <size_t> (thread, env, name, &result, &CommandLineFlags::size_tAtPut);
+ SetVMFlag <size_t> (thread, env, name, &result, &JVMFlag::size_tAtPut);
WB_END
WB_ENTRY(void, WB_SetDoubleVMFlag(JNIEnv* env, jobject o, jstring name, jdouble value))
double result = value;
- SetVMFlag <double> (thread, env, name, &result, &CommandLineFlags::doubleAtPut);
+ SetVMFlag <double> (thread, env, name, &result, &JVMFlag::doubleAtPut);
WB_END
WB_ENTRY(void, WB_SetStringVMFlag(JNIEnv* env, jobject o, jstring name, jstring value))
@@ -1182,7 +1183,7 @@
bool needFree;
{
ThreadInVMfromNative ttvfn(thread); // back to VM
- needFree = SetVMFlag <ccstr> (thread, env, name, &ccstrResult, &CommandLineFlags::ccstrAtPut);
+ needFree = SetVMFlag <ccstr> (thread, env, name, &ccstrResult, &JVMFlag::ccstrAtPut);
}
if (value != NULL) {
env->ReleaseStringUTFChars(value, ccstrValue);
--- a/src/hotspot/share/runtime/arguments.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -42,10 +42,10 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/arguments_ext.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
-#include "runtime/commandLineFlagWriteableList.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagWriteableList.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
#include "runtime/os.inline.hpp"
@@ -739,7 +739,7 @@
// if flag has become obsolete it should not have a "globals" flag defined anymore.
if (!version_less_than(JDK_Version::current(), flag.obsolete_in)) {
- if (Flag::find_flag(flag.name) != NULL) {
+ if (JVMFlag::find_flag(flag.name) != NULL) {
// Temporarily disable the warning: 8196739
// warning("Global variable for obsolete special flag entry \"%s\" should be removed", flag.name);
}
@@ -749,7 +749,7 @@
if (!flag.expired_in.is_undefined()) {
// if flag has become expired it should not have a "globals" flag defined anymore.
if (!version_less_than(JDK_Version::current(), flag.expired_in)) {
- if (Flag::find_flag(flag.name) != NULL) {
+ if (JVMFlag::find_flag(flag.name) != NULL) {
// Temporarily disable the warning: 8196739
// warning("Global variable for expired flag entry \"%s\" should be removed", flag.name);
}
@@ -833,15 +833,15 @@
}
}
-static bool set_bool_flag(const char* name, bool value, Flag::Flags origin) {
- if (CommandLineFlags::boolAtPut(name, &value, origin) == Flag::SUCCESS) {
+static bool set_bool_flag(const char* name, bool value, JVMFlag::Flags origin) {
+ if (JVMFlag::boolAtPut(name, &value, origin) == JVMFlag::SUCCESS) {
return true;
} else {
return false;
}
}
-static bool set_fp_numeric_flag(const char* name, char* value, Flag::Flags origin) {
+static bool set_fp_numeric_flag(const char* name, char* value, JVMFlag::Flags origin) {
char* end;
errno = 0;
double v = strtod(value, &end);
@@ -849,18 +849,18 @@
return false;
}
- if (CommandLineFlags::doubleAtPut(name, &v, origin) == Flag::SUCCESS) {
+ if (JVMFlag::doubleAtPut(name, &v, origin) == JVMFlag::SUCCESS) {
return true;
}
return false;
}
-static bool set_numeric_flag(const char* name, char* value, Flag::Flags origin) {
+static bool set_numeric_flag(const char* name, char* value, JVMFlag::Flags origin) {
julong v;
int int_v;
intx intx_v;
bool is_neg = false;
- Flag* result = Flag::find_flag(name, strlen(name));
+ JVMFlag* result = JVMFlag::find_flag(name, strlen(name));
if (result == NULL) {
return false;
@@ -882,43 +882,43 @@
if (is_neg) {
int_v = -int_v;
}
- return CommandLineFlags::intAtPut(result, &int_v, origin) == Flag::SUCCESS;
+ return JVMFlag::intAtPut(result, &int_v, origin) == JVMFlag::SUCCESS;
} else if (result->is_uint()) {
uint uint_v = (uint) v;
- return CommandLineFlags::uintAtPut(result, &uint_v, origin) == Flag::SUCCESS;
+ return JVMFlag::uintAtPut(result, &uint_v, origin) == JVMFlag::SUCCESS;
} else if (result->is_intx()) {
intx_v = (intx) v;
if (is_neg) {
intx_v = -intx_v;
}
- return CommandLineFlags::intxAtPut(result, &intx_v, origin) == Flag::SUCCESS;
+ return JVMFlag::intxAtPut(result, &intx_v, origin) == JVMFlag::SUCCESS;
} else if (result->is_uintx()) {
uintx uintx_v = (uintx) v;
- return CommandLineFlags::uintxAtPut(result, &uintx_v, origin) == Flag::SUCCESS;
+ return JVMFlag::uintxAtPut(result, &uintx_v, origin) == JVMFlag::SUCCESS;
} else if (result->is_uint64_t()) {
uint64_t uint64_t_v = (uint64_t) v;
- return CommandLineFlags::uint64_tAtPut(result, &uint64_t_v, origin) == Flag::SUCCESS;
+ return JVMFlag::uint64_tAtPut(result, &uint64_t_v, origin) == JVMFlag::SUCCESS;
} else if (result->is_size_t()) {
size_t size_t_v = (size_t) v;
- return CommandLineFlags::size_tAtPut(result, &size_t_v, origin) == Flag::SUCCESS;
+ return JVMFlag::size_tAtPut(result, &size_t_v, origin) == JVMFlag::SUCCESS;
} else if (result->is_double()) {
double double_v = (double) v;
- return CommandLineFlags::doubleAtPut(result, &double_v, origin) == Flag::SUCCESS;
+ return JVMFlag::doubleAtPut(result, &double_v, origin) == JVMFlag::SUCCESS;
} else {
return false;
}
}
-static bool set_string_flag(const char* name, const char* value, Flag::Flags origin) {
- if (CommandLineFlags::ccstrAtPut(name, &value, origin) != Flag::SUCCESS) return false;
- // Contract: CommandLineFlags always returns a pointer that needs freeing.
+static bool set_string_flag(const char* name, const char* value, JVMFlag::Flags origin) {
+ if (JVMFlag::ccstrAtPut(name, &value, origin) != JVMFlag::SUCCESS) return false;
+ // Contract: JVMFlag always returns a pointer that needs freeing.
FREE_C_HEAP_ARRAY(char, value);
return true;
}
-static bool append_to_string_flag(const char* name, const char* new_value, Flag::Flags origin) {
+static bool append_to_string_flag(const char* name, const char* new_value, JVMFlag::Flags origin) {
const char* old_value = "";
- if (CommandLineFlags::ccstrAt(name, &old_value) != Flag::SUCCESS) return false;
+ if (JVMFlag::ccstrAt(name, &old_value) != JVMFlag::SUCCESS) return false;
size_t old_len = old_value != NULL ? strlen(old_value) : 0;
size_t new_len = strlen(new_value);
const char* value;
@@ -935,11 +935,11 @@
value = buf;
free_this_too = buf;
}
- (void) CommandLineFlags::ccstrAtPut(name, &value, origin);
- // CommandLineFlags always returns a pointer that needs freeing.
+ (void) JVMFlag::ccstrAtPut(name, &value, origin);
+ // JVMFlag always returns a pointer that needs freeing.
FREE_C_HEAP_ARRAY(char, value);
if (free_this_too != NULL) {
- // CommandLineFlags made its own copy, so I must delete my own temp. buffer.
+ // JVMFlag made its own copy, so I must delete my own temp. buffer.
FREE_C_HEAP_ARRAY(char, free_this_too);
}
return true;
@@ -1010,7 +1010,7 @@
return a;
}
-bool Arguments::parse_argument(const char* arg, Flag::Flags origin) {
+bool Arguments::parse_argument(const char* arg, JVMFlag::Flags origin) {
// range of acceptable characters spelled out for portability reasons
#define NAME_RANGE "[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_]"
@@ -1048,7 +1048,7 @@
char punct;
if (sscanf(arg, "%" XSTR(BUFLEN) NAME_RANGE "%c", name, &punct) == 2 && punct == '=') {
const char* value = strchr(arg, '=') + 1;
- Flag* flag;
+ JVMFlag* flag;
// this scanf pattern matches both strings (handled here) and numbers (handled later))
AliasedLoggingFlag alf = catch_logging_aliases(name, true);
@@ -1060,7 +1060,7 @@
if (real_name == NULL) {
return false;
}
- flag = Flag::find_flag(real_name);
+ flag = JVMFlag::find_flag(real_name);
if (flag != NULL && flag->is_ccstr()) {
if (flag->ccstr_accumulates()) {
return append_to_string_flag(real_name, value, origin);
@@ -1221,7 +1221,7 @@
bool Arguments::process_argument(const char* arg,
jboolean ignore_unrecognized,
- Flag::Flags origin) {
+ JVMFlag::Flags origin) {
JDK_Version since = JDK_Version();
if (parse_argument(arg, origin)) {
@@ -1266,10 +1266,10 @@
// For locked flags, report a custom error message if available.
// Otherwise, report the standard unrecognized VM option.
- Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
+ JVMFlag* found_flag = JVMFlag::find_flag((const char*)argname, arg_len, true, true);
if (found_flag != NULL) {
char locked_message_buf[BUFLEN];
- Flag::MsgType msg_type = found_flag->get_locked_message(locked_message_buf, BUFLEN);
+ JVMFlag::MsgType msg_type = found_flag->get_locked_message(locked_message_buf, BUFLEN);
if (strlen(locked_message_buf) == 0) {
if (found_flag->is_bool() && !has_plus_minus) {
jio_fprintf(defaultStream::error_stream(),
@@ -1283,8 +1283,8 @@
}
} else {
#ifdef PRODUCT
- bool mismatched = ((msg_type == Flag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD) ||
- (msg_type == Flag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD));
+ bool mismatched = ((msg_type == JVMFlag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD) ||
+ (msg_type == JVMFlag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD));
if (ignore_unrecognized && mismatched) {
return true;
}
@@ -1297,7 +1297,7 @@
}
jio_fprintf(defaultStream::error_stream(),
"Unrecognized VM option '%s'\n", argname);
- Flag* fuzzy_matched = Flag::fuzzy_match((const char*)argname, arg_len, true);
+ JVMFlag* fuzzy_matched = JVMFlag::fuzzy_match((const char*)argname, arg_len, true);
if (fuzzy_matched != NULL) {
jio_fprintf(defaultStream::error_stream(),
"Did you mean '%s%s%s'? ",
@@ -1350,7 +1350,7 @@
// this allows a way to include spaces in string-valued options
token[pos] = '\0';
logOption(token);
- result &= process_argument(token, ignore_unrecognized, Flag::CONFIG_FILE);
+ result &= process_argument(token, ignore_unrecognized, JVMFlag::CONFIG_FILE);
build_jvm_flags(token);
pos = 0;
in_white_space = true;
@@ -1368,7 +1368,7 @@
}
if (pos > 0) {
token[pos] = '\0';
- result &= process_argument(token, ignore_unrecognized, Flag::CONFIG_FILE);
+ result &= process_argument(token, ignore_unrecognized, JVMFlag::CONFIG_FILE);
build_jvm_flags(token);
}
fclose(stream);
@@ -1991,10 +1991,10 @@
initHeapSize = limit_by_allocatable_memory(initHeapSize);
if (FLAG_IS_DEFAULT(MaxHeapSize)) {
- if (FLAG_SET_CMDLINE(size_t, MaxHeapSize, initHeapSize) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, MaxHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, initHeapSize) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, initHeapSize) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// Currently the minimum size and the initial heap sizes are the same.
@@ -2003,10 +2003,10 @@
if (FLAG_IS_DEFAULT(NewSize)) {
// Make the young generation 3/8ths of the total heap.
if (FLAG_SET_CMDLINE(size_t, NewSize,
- ((julong) MaxHeapSize / (julong) 8) * (julong) 3) != Flag::SUCCESS) {
+ ((julong) MaxHeapSize / (julong) 8) * (julong) 3) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, MaxNewSize, NewSize) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, MaxNewSize, NewSize) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
}
@@ -2016,20 +2016,20 @@
#endif
// Increase some data structure sizes for efficiency
- if (FLAG_SET_CMDLINE(size_t, BaseFootPrintEstimate, MaxHeapSize) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, BaseFootPrintEstimate, MaxHeapSize) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, ResizeTLAB, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ResizeTLAB, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, TLABSize, 256 * K) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, TLABSize, 256 * K) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// See the OldPLABSize comment below, but replace 'after promotion'
// with 'after copying'. YoungPLABSize is the size of the survivor
// space per-gc-thread buffers. The default is 4kw.
- if (FLAG_SET_CMDLINE(size_t, YoungPLABSize, 256 * K) != Flag::SUCCESS) { // Note: this is in words
+ if (FLAG_SET_CMDLINE(size_t, YoungPLABSize, 256 * K) != JVMFlag::SUCCESS) { // Note: this is in words
return JNI_EINVAL;
}
@@ -2046,29 +2046,29 @@
// locality. A minor effect may be that larger PLABs reduce the
// number of PLAB allocation events during gc. The value of 8kw
// was arrived at by experimenting with specjbb.
- if (FLAG_SET_CMDLINE(size_t, OldPLABSize, 8 * K) != Flag::SUCCESS) { // Note: this is in words
+ if (FLAG_SET_CMDLINE(size_t, OldPLABSize, 8 * K) != JVMFlag::SUCCESS) { // Note: this is in words
return JNI_EINVAL;
}
// Enable parallel GC and adaptive generation sizing
- if (FLAG_SET_CMDLINE(bool, UseParallelGC, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseParallelGC, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// Encourage steady state memory management
- if (FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// This appears to improve mutator locality
- if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// Get around early Solaris scheduling bug
// (affinity vs other jobs on system)
// but disallow DR and offlining (5008695).
- if (FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
@@ -2409,20 +2409,20 @@
// Parse args structure generated from JAVA_TOOL_OPTIONS environment
// variable (if present).
- jint result = parse_each_vm_init_arg(java_tool_options_args, &patch_mod_javabase, Flag::ENVIRON_VAR);
+ jint result = parse_each_vm_init_arg(java_tool_options_args, &patch_mod_javabase, JVMFlag::ENVIRON_VAR);
if (result != JNI_OK) {
return result;
}
// Parse args structure generated from the command line flags.
- result = parse_each_vm_init_arg(cmd_line_args, &patch_mod_javabase, Flag::COMMAND_LINE);
+ result = parse_each_vm_init_arg(cmd_line_args, &patch_mod_javabase, JVMFlag::COMMAND_LINE);
if (result != JNI_OK) {
return result;
}
// Parse args structure generated from the _JAVA_OPTIONS environment
// variable (if present) (mimics classic VM)
- result = parse_each_vm_init_arg(java_options_args, &patch_mod_javabase, Flag::ENVIRON_VAR);
+ result = parse_each_vm_init_arg(java_options_args, &patch_mod_javabase, JVMFlag::ENVIRON_VAR);
if (result != JNI_OK) {
return result;
}
@@ -2566,7 +2566,7 @@
return JNI_OK;
}
-jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_mod_javabase, Flag::Flags origin) {
+jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_mod_javabase, JVMFlag::Flags origin) {
// For match_option to return remaining or value part of option string
const char* tail;
@@ -2599,7 +2599,7 @@
} else if (!strcmp(tail, ":gc")) {
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(gc));
} else if (!strcmp(tail, ":jni")) {
- if (FLAG_SET_CMDLINE(bool, PrintJNIResolving, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, PrintJNIResolving, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
}
@@ -2736,24 +2736,24 @@
set_enable_preview();
// -Xnoclassgc
} else if (match_option(option, "-Xnoclassgc")) {
- if (FLAG_SET_CMDLINE(bool, ClassUnloading, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ClassUnloading, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xconcgc
} else if (match_option(option, "-Xconcgc")) {
- if (FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
handle_extra_cms_flags("-Xconcgc uses UseConcMarkSweepGC");
// -Xnoconcgc
} else if (match_option(option, "-Xnoconcgc")) {
- if (FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
handle_extra_cms_flags("-Xnoconcgc uses UseConcMarkSweepGC");
// -Xbatch
} else if (match_option(option, "-Xbatch")) {
- if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xmn for compatibility with other JVM vendors
@@ -2766,10 +2766,10 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, MaxNewSize, (size_t)long_initial_young_size) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, MaxNewSize, (size_t)long_initial_young_size) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, NewSize, (size_t)long_initial_young_size) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, NewSize, (size_t)long_initial_young_size) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xms
@@ -2786,7 +2786,7 @@
set_min_heap_size((size_t)long_initial_heap_size);
// Currently the minimum size and the initial heap sizes are the same.
// Can be overridden with -XX:InitialHeapSize.
- if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, (size_t)long_initial_heap_size) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, InitialHeapSize, (size_t)long_initial_heap_size) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xmx
@@ -2799,7 +2799,7 @@
describe_range_error(errcode);
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, MaxHeapSize, (size_t)long_max_heap_size) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(size_t, MaxHeapSize, (size_t)long_max_heap_size) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// Xmaxf
@@ -2812,7 +2812,7 @@
option->optionString);
return JNI_EINVAL;
} else {
- if (FLAG_SET_CMDLINE(uintx, MaxHeapFreeRatio, maxf) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, MaxHeapFreeRatio, maxf) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
}
@@ -2826,7 +2826,7 @@
option->optionString);
return JNI_EINVAL;
} else {
- if (FLAG_SET_CMDLINE(uintx, MinHeapFreeRatio, minf) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, MinHeapFreeRatio, minf) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
}
@@ -2837,7 +2837,7 @@
if (err != JNI_OK) {
return err;
}
- if (FLAG_SET_CMDLINE(intx, ThreadStackSize, value) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(intx, ThreadStackSize, value) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-Xmaxjitcodesize", &tail) ||
@@ -2850,7 +2850,7 @@
"Invalid maximum code cache size: %s.\n", option->optionString);
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, ReservedCodeCacheSize, (uintx)long_ReservedCodeCacheSize) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -green
@@ -2864,7 +2864,7 @@
// -Xrs
} else if (match_option(option, "-Xrs")) {
// Classic/EVM option, new functionality
- if (FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xprof
@@ -2875,17 +2875,17 @@
warning("Ignoring option %s; support was removed in %s", option->optionString, version);
// -Xconcurrentio
} else if (match_option(option, "-Xconcurrentio")) {
- if (FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BackgroundCompilation, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
SafepointSynchronize::set_defer_thr_suspend_loop_count();
- if (FLAG_SET_CMDLINE(bool, UseTLAB, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseTLAB, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(size_t, NewSizeThreadIncrease, 16 * K) != Flag::SUCCESS) { // 20Kb per thread added to new generation
+ if (FLAG_SET_CMDLINE(size_t, NewSizeThreadIncrease, 16 * K) != JVMFlag::SUCCESS) { // 20Kb per thread added to new generation
return JNI_EINVAL;
}
@@ -2897,7 +2897,7 @@
#ifndef PRODUCT
// -Xprintflags
} else if (match_option(option, "-Xprintflags")) {
- CommandLineFlags::printFlags(tty, false);
+ JVMFlag::printFlags(tty, false);
vm_exit(0);
#endif
// -D
@@ -2932,7 +2932,7 @@
// Out of the box management support
if (match_option(option, "-Dcom.sun.management", &tail)) {
#if INCLUDE_MANAGEMENT
- if (FLAG_SET_CMDLINE(bool, ManagementServer, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ManagementServer, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// management agent in module jdk.management.agent
@@ -2957,55 +2957,55 @@
set_mode_flags(_comp);
// -Xshare:dump
} else if (match_option(option, "-Xshare:dump")) {
- if (FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
set_mode_flags(_int); // Prevent compilation, which creates objects
// -Xshare:on
} else if (match_option(option, "-Xshare:on")) {
- if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xshare:auto
} else if (match_option(option, "-Xshare:auto")) {
- if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xshare:off
} else if (match_option(option, "-Xshare:off")) {
- if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// -Xverify
} else if (match_option(option, "-Xverify", &tail)) {
if (strcmp(tail, ":all") == 0 || strcmp(tail, "") == 0) {
- if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (strcmp(tail, ":remote") == 0) {
- if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (strcmp(tail, ":none") == 0) {
- if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BytecodeVerificationLocal, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, BytecodeVerificationRemote, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (is_bad_option(option, args->ignoreUnrecognized, "verification")) {
@@ -3064,23 +3064,23 @@
// Need to keep consistency of MaxTenuringThreshold and AlwaysTenure/NeverTenure;
// and the last option wins.
} else if (match_option(option, "-XX:+NeverTenure")) {
- if (FLAG_SET_CMDLINE(bool, NeverTenure, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, NeverTenure, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:+AlwaysTenure")) {
- if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, AlwaysTenure, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, AlwaysTenure, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:MaxTenuringThreshold=", &tail)) {
@@ -3091,51 +3091,51 @@
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, max_tenuring_thresh) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, max_tenuring_thresh) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
if (MaxTenuringThreshold == 0) {
- if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, AlwaysTenure, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, AlwaysTenure, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else {
- if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, NeverTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, AlwaysTenure, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
}
} else if (match_option(option, "-XX:+DisplayVMOutputToStderr")) {
- if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:+DisplayVMOutputToStdout")) {
- if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
} else if (match_option(option, "-XX:+ExtendedDTraceProbes")) {
#if defined(DTRACE_ENABLED)
- if (FLAG_SET_CMDLINE(bool, ExtendedDTraceProbes, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ExtendedDTraceProbes, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, DTraceMethodProbes, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DTraceMethodProbes, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, DTraceAllocProbes, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DTraceAllocProbes, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, DTraceMonitorProbes, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, DTraceMonitorProbes, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
#else // defined(DTRACE_ENABLED)
@@ -3145,11 +3145,11 @@
#endif // defined(DTRACE_ENABLED)
#ifdef ASSERT
} else if (match_option(option, "-XX:+FullGCALot")) {
- if (FLAG_SET_CMDLINE(bool, FullGCALot, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, FullGCALot, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
// disable scavenge before parallel mark-compact
- if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
#endif
@@ -3178,10 +3178,10 @@
// -Xshare:on
// -Xlog:class+path=info
if (PrintSharedArchiveAndExit) {
- if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, UseSharedSpaces, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
- if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != Flag::SUCCESS) {
+ if (FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true) != JVMFlag::SUCCESS) {
return JNI_EINVAL;
}
LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(class, path));
@@ -3856,7 +3856,7 @@
continue;
}
if (match_option(option, "-XX:+PrintFlagsInitial")) {
- CommandLineFlags::printFlags(tty, false);
+ JVMFlag::printFlags(tty, false);
vm_exit(0);
}
if (match_option(option, "-XX:NativeMemoryTracking", &tail)) {
@@ -3885,13 +3885,13 @@
#ifndef PRODUCT
if (match_option(option, "-XX:+PrintFlagsWithComments")) {
- CommandLineFlags::printFlags(tty, true);
+ JVMFlag::printFlags(tty, true);
vm_exit(0);
}
#endif
if (match_option(option, "-XX:+UseAppCDS")) {
- Flag* flag = Flag::find_flag("SharedArchiveFile", 17, true, true);
+ JVMFlag* flag = JVMFlag::find_flag("SharedArchiveFile", 17, true, true);
if (flag->is_diagnostic()) {
flag->clear_diagnostic();
}
@@ -3947,9 +3947,9 @@
assert(verify_special_jvm_flags(), "deprecated and obsolete flag table inconsistent");
// Initialize ranges, constraints and writeables
- CommandLineFlagRangeList::init();
- CommandLineFlagConstraintList::init();
- CommandLineFlagWriteableList::init();
+ JVMFlagRangeList::init();
+ JVMFlagConstraintList::init();
+ JVMFlagWriteableList::init();
// If flag "-XX:Flags=flags-file" is used it will be the first option to be processed.
const char* hotspotrc = ".hotspotrc";
@@ -4250,7 +4250,7 @@
#endif // PRODUCT
if (PrintCommandLineFlags) {
- CommandLineFlags::printSetFlags(tty);
+ JVMFlag::printSetFlags(tty);
}
// Apply CPU specific policy for the BiasedLocking
--- a/src/hotspot/share/runtime/arguments.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/arguments.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -28,6 +28,7 @@
#include "logging/logLevel.hpp"
#include "logging/logTag.hpp"
#include "memory/allocation.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/perfData.hpp"
@@ -413,8 +414,8 @@
// Argument parsing
static void do_pd_flag_adjustments();
- static bool parse_argument(const char* arg, Flag::Flags origin);
- static bool process_argument(const char* arg, jboolean ignore_unrecognized, Flag::Flags origin);
+ static bool parse_argument(const char* arg, JVMFlag::Flags origin);
+ static bool process_argument(const char* arg, jboolean ignore_unrecognized, JVMFlag::Flags origin);
static void process_java_launcher_argument(const char*, void*);
static void process_java_compiler_argument(const char* arg);
static jint parse_options_environment_variable(const char* name, ScopedVMInitArgs* vm_args);
@@ -442,7 +443,7 @@
static jint parse_vm_init_args(const JavaVMInitArgs *java_tool_options_args,
const JavaVMInitArgs *java_options_args,
const JavaVMInitArgs *cmd_line_args);
- static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_mod_javabase, Flag::Flags origin);
+ static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_mod_javabase, JVMFlag::Flags origin);
static jint finalize_vm_init_args(bool patch_mod_javabase);
static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type);
--- a/src/hotspot/share/runtime/commandLineFlagConstraintList.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,368 +0,0 @@
-/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/stringTable.hpp"
-#include "classfile/symbolTable.hpp"
-#include "gc/shared/commandLineFlagConstraintsGC.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
-#include "runtime/commandLineFlagConstraintsCompiler.hpp"
-#include "runtime/commandLineFlagConstraintsRuntime.hpp"
-#include "runtime/os.hpp"
-#include "utilities/macros.hpp"
-#ifdef COMPILER1
-#include "c1/c1_globals.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/c2_globals.hpp"
-#endif
-
-
-class CommandLineFlagConstraint_bool : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_bool _constraint;
- const bool* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_bool(const char* name, const bool* ptr,
- CommandLineFlagConstraintFunc_bool func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- bool value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_bool(bool value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_int : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_int _constraint;
- const int* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_int(const char* name, const int* ptr,
- CommandLineFlagConstraintFunc_int func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- int value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_int(int value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_intx : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_intx _constraint;
- const intx* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_intx(const char* name, const intx* ptr,
- CommandLineFlagConstraintFunc_intx func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- intx value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_intx(intx value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_uint : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_uint _constraint;
- const uint* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_uint(const char* name, const uint* ptr,
- CommandLineFlagConstraintFunc_uint func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- uint value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_uint(uint value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_uintx : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_uintx _constraint;
- const uintx* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_uintx(const char* name, const uintx* ptr,
- CommandLineFlagConstraintFunc_uintx func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- uintx value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_uintx(uintx value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_uint64_t : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_uint64_t _constraint;
- const uint64_t* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_uint64_t(const char* name, const uint64_t* ptr,
- CommandLineFlagConstraintFunc_uint64_t func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- uint64_t value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_uint64_t(uint64_t value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_size_t : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_size_t _constraint;
- const size_t* _ptr;
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_size_t(const char* name, const size_t* ptr,
- CommandLineFlagConstraintFunc_size_t func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- size_t value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_size_t(size_t value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-class CommandLineFlagConstraint_double : public CommandLineFlagConstraint {
- CommandLineFlagConstraintFunc_double _constraint;
- const double* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint_double(const char* name, const double* ptr,
- CommandLineFlagConstraintFunc_double func,
- ConstraintType type) : CommandLineFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
-
- Flag::Error apply(bool verbose) {
- double value = *_ptr;
- return _constraint(value, verbose);
- }
-
- Flag::Error apply_double(double value, bool verbose) {
- return _constraint(value, verbose);
- }
-};
-
-// No constraint emitting
-void emit_constraint_no(...) { /* NOP */ }
-
-// No constraint emitting if function argument is NOT provided
-void emit_constraint_bool(const char* /*name*/, const bool* /*value*/) { /* NOP */ }
-void emit_constraint_ccstr(const char* /*name*/, const ccstr* /*value*/) { /* NOP */ }
-void emit_constraint_ccstrlist(const char* /*name*/, const ccstrlist* /*value*/) { /* NOP */ }
-void emit_constraint_int(const char* /*name*/, const int* /*value*/) { /* NOP */ }
-void emit_constraint_intx(const char* /*name*/, const intx* /*value*/) { /* NOP */ }
-void emit_constraint_uint(const char* /*name*/, const uint* /*value*/) { /* NOP */ }
-void emit_constraint_uintx(const char* /*name*/, const uintx* /*value*/) { /* NOP */ }
-void emit_constraint_uint64_t(const char* /*name*/, const uint64_t* /*value*/) { /* NOP */ }
-void emit_constraint_size_t(const char* /*name*/, const size_t* /*value*/) { /* NOP */ }
-void emit_constraint_double(const char* /*name*/, const double* /*value*/) { /* NOP */ }
-
-// CommandLineFlagConstraint emitting code functions if function argument is provided
-void emit_constraint_bool(const char* name, const bool* ptr, CommandLineFlagConstraintFunc_bool func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_bool(name, ptr, func, type));
-}
-void emit_constraint_int(const char* name, const int* ptr, CommandLineFlagConstraintFunc_int func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_int(name, ptr, func, type));
-}
-void emit_constraint_intx(const char* name, const intx* ptr, CommandLineFlagConstraintFunc_intx func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_intx(name, ptr, func, type));
-}
-void emit_constraint_uint(const char* name, const uint* ptr, CommandLineFlagConstraintFunc_uint func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_uint(name, ptr, func, type));
-}
-void emit_constraint_uintx(const char* name, const uintx* ptr, CommandLineFlagConstraintFunc_uintx func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_uintx(name, ptr, func, type));
-}
-void emit_constraint_uint64_t(const char* name, const uint64_t* ptr, CommandLineFlagConstraintFunc_uint64_t func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_uint64_t(name, ptr, func, type));
-}
-void emit_constraint_size_t(const char* name, const size_t* ptr, CommandLineFlagConstraintFunc_size_t func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_size_t(name, ptr, func, type));
-}
-void emit_constraint_double(const char* name, const double* ptr, CommandLineFlagConstraintFunc_double func, CommandLineFlagConstraint::ConstraintType type) {
- CommandLineFlagConstraintList::add(new CommandLineFlagConstraint_double(name, ptr, func, type));
-}
-
-// Generate code to call emit_constraint_xxx function
-#define EMIT_CONSTRAINT_PRODUCT_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_COMMERCIAL_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_DIAGNOSTIC_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_EXPERIMENTAL_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_MANAGEABLE_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_PRODUCT_RW_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_PD_PRODUCT_FLAG(type, name, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG(type, name, doc) ); emit_constraint_##type(#name,&name
-#ifndef PRODUCT
-#define EMIT_CONSTRAINT_DEVELOPER_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_PD_DEVELOPER_FLAG(type, name, doc) ); emit_constraint_##type(#name,&name
-#define EMIT_CONSTRAINT_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#else
-#define EMIT_CONSTRAINT_DEVELOPER_FLAG(type, name, value, doc) ); emit_constraint_no(#name,&name
-#define EMIT_CONSTRAINT_PD_DEVELOPER_FLAG(type, name, doc) ); emit_constraint_no(#name,&name
-#define EMIT_CONSTRAINT_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_constraint_no(#name,&name
-#endif
-#ifdef _LP64
-#define EMIT_CONSTRAINT_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
-#else
-#define EMIT_CONSTRAINT_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_constraint_no(#name,&name
-#endif
-
-// Generate func argument to pass into emit_constraint_xxx functions
-#define EMIT_CONSTRAINT_CHECK(func, type) , func, CommandLineFlagConstraint::type
-
-// the "name" argument must be a string literal
-#define INITIAL_CONSTRAINTS_SIZE 72
-GrowableArray<CommandLineFlagConstraint*>* CommandLineFlagConstraintList::_constraints = NULL;
-CommandLineFlagConstraint::ConstraintType CommandLineFlagConstraintList::_validating_type = CommandLineFlagConstraint::AtParse;
-
-// Check the ranges of all flags that have them or print them out and exit if requested
-void CommandLineFlagConstraintList::init(void) {
- _constraints = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<CommandLineFlagConstraint*>(INITIAL_CONSTRAINTS_SIZE, true);
-
- emit_constraint_no(NULL VM_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PRODUCT_FLAG,
- EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
- EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
- EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
- EMIT_CONSTRAINT_MANAGEABLE_FLAG,
- EMIT_CONSTRAINT_PRODUCT_RW_FLAG,
- EMIT_CONSTRAINT_LP64_PRODUCT_FLAG,
- IGNORE_RANGE,
- EMIT_CONSTRAINT_CHECK,
- IGNORE_WRITEABLE));
-
- EMIT_CONSTRAINTS_FOR_GLOBALS_EXT
-
- emit_constraint_no(NULL ARCH_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PRODUCT_FLAG,
- EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
- EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- EMIT_CONSTRAINT_CHECK,
- IGNORE_WRITEABLE));
-
-
-#ifdef COMPILER1
- emit_constraint_no(NULL C1_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PRODUCT_FLAG,
- EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
- EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- EMIT_CONSTRAINT_CHECK,
- IGNORE_WRITEABLE));
-#endif // COMPILER1
-
-#ifdef COMPILER2
- emit_constraint_no(NULL C2_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
- EMIT_CONSTRAINT_PRODUCT_FLAG,
- EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
- EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG,
- EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
- EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- EMIT_CONSTRAINT_CHECK,
- IGNORE_WRITEABLE));
-#endif // COMPILER2
-}
-
-CommandLineFlagConstraint* CommandLineFlagConstraintList::find(const char* name) {
- CommandLineFlagConstraint* found = NULL;
- for (int i=0; i<length(); i++) {
- CommandLineFlagConstraint* constraint = at(i);
- if (strcmp(constraint->name(), name) == 0) {
- found = constraint;
- break;
- }
- }
- return found;
-}
-
-// Find constraints by name and return only if found constraint's type is equal or lower than current validating type.
-CommandLineFlagConstraint* CommandLineFlagConstraintList::find_if_needs_check(const char* name) {
- CommandLineFlagConstraint* found = NULL;
- CommandLineFlagConstraint* constraint = find(name);
- if (constraint && (constraint->type() <= _validating_type)) {
- found = constraint;
- }
- return found;
-}
-
-// Check constraints for specific constraint type.
-bool CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::ConstraintType type) {
- guarantee(type > _validating_type, "Constraint check is out of order.");
- _validating_type = type;
-
- bool status = true;
- for (int i=0; i<length(); i++) {
- CommandLineFlagConstraint* constraint = at(i);
- if (type != constraint->type()) continue;
- if (constraint->apply(true) != Flag::SUCCESS) status = false;
- }
- return status;
-}
--- a/src/hotspot/share/runtime/commandLineFlagConstraintList.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTLIST_HPP
-#define SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTLIST_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/growableArray.hpp"
-
-/*
- * Here we have a mechanism for extracting constraints (as custom functions) for flags,
- * which otherwise can not be expressed via simple range check, specified in flag macro tables.
- *
- * An example of a constraint is "flag1 < flag2" where both flag1 and flag2 can change.
- *
- * See runtime "runtime/commandLineFlagConstraintsCompiler.hpp",
- * "runtime/commandLineFlagConstraintsGC.hpp" and
- * "runtime/commandLineFlagConstraintsRuntime.hpp" for the functions themselves.
- */
-
-typedef Flag::Error (*CommandLineFlagConstraintFunc_bool)(bool value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_int)(int value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_intx)(intx value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_uint)(uint value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_uintx)(uintx value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_uint64_t)(uint64_t value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_size_t)(size_t value, bool verbose);
-typedef Flag::Error (*CommandLineFlagConstraintFunc_double)(double value, bool verbose);
-
-class CommandLineFlagConstraint : public CHeapObj<mtArguments> {
-public:
- // During VM initialization, constraint validation will be done order of ConstraintType.
- enum ConstraintType {
- // Will be validated during argument processing (Arguments::parse_argument).
- AtParse = 0,
- // Will be validated inside Threads::create_vm(), right after Arguments::apply_ergo().
- AfterErgo = 1,
- // Will be validated inside universe_init(), right after Metaspace::global_initialize().
- AfterMemoryInit = 2
- };
-
-private:
- const char* _name;
- ConstraintType _validate_type;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagConstraint(const char* name, ConstraintType type) { _name=name; _validate_type=type; };
- ~CommandLineFlagConstraint() {};
- const char* name() const { return _name; }
- ConstraintType type() const { return _validate_type; }
- virtual Flag::Error apply(bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_bool(bool value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_int(int value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_intx(intx value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_uint(uint value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_uintx(uintx value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_uint64_t(uint64_t value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_size_t(size_t value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
- virtual Flag::Error apply_double(double value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; };
-};
-
-class CommandLineFlagConstraintList : public AllStatic {
-private:
- static GrowableArray<CommandLineFlagConstraint*>* _constraints;
- // Latest constraint validation type.
- static CommandLineFlagConstraint::ConstraintType _validating_type;
-public:
- static void init();
- static int length() { return (_constraints != NULL) ? _constraints->length() : 0; }
- static CommandLineFlagConstraint* at(int i) { return (_constraints != NULL) ? _constraints->at(i) : NULL; }
- static CommandLineFlagConstraint* find(const char* name);
- static CommandLineFlagConstraint* find_if_needs_check(const char* name);
- static void add(CommandLineFlagConstraint* constraint) { _constraints->append(constraint); }
- // True if 'AfterErgo' or later constraint functions are validated.
- static bool validated_after_ergo() { return _validating_type >= CommandLineFlagConstraint::AfterErgo; };
- static bool check_constraints(CommandLineFlagConstraint::ConstraintType type);
-};
-
-#endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTLIST_HPP */
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,407 +0,0 @@
-/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "code/relocInfo.hpp"
-#include "compiler/compilerDefinitions.hpp"
-#include "oops/metadata.hpp"
-#include "runtime/os.hpp"
-#include "interpreter/invocationCounter.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagConstraintsCompiler.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/defaultStream.hpp"
-
-Flag::Error AliasLevelConstraintFunc(intx value, bool verbose) {
- if ((value <= 1) && (Arguments::mode() == Arguments::_comp || Arguments::mode() == Arguments::_mixed)) {
- CommandLineError::print(verbose,
- "AliasLevel (" INTX_FORMAT ") is not "
- "compatible with -Xcomp or -Xmixed\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-/**
- * Validate the minimum number of compiler threads needed to run the
- * JVM. The following configurations are possible.
- *
- * 1) The JVM is build using an interpreter only. As a result, the minimum number of
- * compiler threads is 0.
- * 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As
- * a result, either C1 or C2 is used, so the minimum number of compiler threads is 1.
- * 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However,
- * the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only
- * C1 can be used, so the minimum number of compiler threads is 1.
- * 4) The JVM is build using the compilers and tiered compilation is enabled. The option
- * 'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result,
- * the minimum number of compiler threads is 2.
- */
-Flag::Error CICompilerCountConstraintFunc(intx value, bool verbose) {
- int min_number_of_compiler_threads = 0;
-#if !defined(COMPILER1) && !defined(COMPILER2) && !INCLUDE_JVMCI
- // case 1
-#else
- if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) {
- min_number_of_compiler_threads = 1; // case 2 or case 3
- } else {
- min_number_of_compiler_threads = 2; // case 4 (tiered)
- }
-#endif
-
- // The default CICompilerCount's value is CI_COMPILER_COUNT.
- // With a client VM, -XX:+TieredCompilation causes TieredCompilation
- // to be true here (the option is validated later) and
- // min_number_of_compiler_threads to exceed CI_COMPILER_COUNT.
- min_number_of_compiler_threads = MIN2(min_number_of_compiler_threads, CI_COMPILER_COUNT);
-
- if (value < (intx)min_number_of_compiler_threads) {
- CommandLineError::print(verbose,
- "CICompilerCount (" INTX_FORMAT ") must be "
- "at least %d \n",
- value, min_number_of_compiler_threads);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error AllocatePrefetchDistanceConstraintFunc(intx value, bool verbose) {
- if (value < 0 || value > 512) {
- CommandLineError::print(verbose,
- "AllocatePrefetchDistance (" INTX_FORMAT ") must be "
- "between 0 and " INTX_FORMAT "\n",
- AllocatePrefetchDistance, 512);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error AllocatePrefetchStepSizeConstraintFunc(intx value, bool verbose) {
- if (AllocatePrefetchStyle == 3) {
- if (value % wordSize != 0) {
- CommandLineError::print(verbose,
- "AllocatePrefetchStepSize (" INTX_FORMAT ") must be multiple of %d\n",
- value, wordSize);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error AllocatePrefetchInstrConstraintFunc(intx value, bool verbose) {
- intx max_value = max_intx;
-#if defined(SPARC)
- max_value = 1;
-#elif defined(X86)
- max_value = 3;
-#endif
- if (value < 0 || value > max_value) {
- CommandLineError::print(verbose,
- "AllocatePrefetchInstr (" INTX_FORMAT ") must be "
- "between 0 and " INTX_FORMAT "\n", value, max_value);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error CompileThresholdConstraintFunc(intx value, bool verbose) {
- if (value < 0 || value > INT_MAX >> InvocationCounter::count_shift) {
- CommandLineError::print(verbose,
- "CompileThreshold (" INTX_FORMAT ") "
- "must be between 0 and %d\n",
- value,
- INT_MAX >> InvocationCounter::count_shift);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error OnStackReplacePercentageConstraintFunc(intx value, bool verbose) {
- int backward_branch_limit;
- if (ProfileInterpreter) {
- if (OnStackReplacePercentage < InterpreterProfilePercentage) {
- CommandLineError::print(verbose,
- "OnStackReplacePercentage (" INTX_FORMAT ") must be "
- "larger than InterpreterProfilePercentage (" INTX_FORMAT ")\n",
- OnStackReplacePercentage, InterpreterProfilePercentage);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- backward_branch_limit = ((CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100)
- << InvocationCounter::count_shift;
-
- if (backward_branch_limit < 0) {
- CommandLineError::print(verbose,
- "CompileThreshold * (InterpreterProfilePercentage - OnStackReplacePercentage) / 100 = "
- INTX_FORMAT " "
- "must be between 0 and " INTX_FORMAT ", try changing "
- "CompileThreshold, InterpreterProfilePercentage, and/or OnStackReplacePercentage\n",
- (CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100,
- INT_MAX >> InvocationCounter::count_shift);
- return Flag::VIOLATES_CONSTRAINT;
- }
- } else {
- if (OnStackReplacePercentage < 0 ) {
- CommandLineError::print(verbose,
- "OnStackReplacePercentage (" INTX_FORMAT ") must be "
- "non-negative\n", OnStackReplacePercentage);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- backward_branch_limit = ((CompileThreshold * OnStackReplacePercentage) / 100)
- << InvocationCounter::count_shift;
-
- if (backward_branch_limit < 0) {
- CommandLineError::print(verbose,
- "CompileThreshold * OnStackReplacePercentage / 100 = " INTX_FORMAT " "
- "must be between 0 and " INTX_FORMAT ", try changing "
- "CompileThreshold and/or OnStackReplacePercentage\n",
- (CompileThreshold * OnStackReplacePercentage) / 100,
- INT_MAX >> InvocationCounter::count_shift);
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
-
-Flag::Error CodeCacheSegmentSizeConstraintFunc(uintx value, bool verbose) {
- if (CodeCacheSegmentSize < (uintx)CodeEntryAlignment) {
- CommandLineError::print(verbose,
- "CodeCacheSegmentSize (" UINTX_FORMAT ") must be "
- "larger than or equal to CodeEntryAlignment (" INTX_FORMAT ") "
- "to align entry points\n",
- CodeCacheSegmentSize, CodeEntryAlignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- if (CodeCacheSegmentSize < sizeof(jdouble)) {
- CommandLineError::print(verbose,
- "CodeCacheSegmentSize (" UINTX_FORMAT ") must be "
- "at least " SIZE_FORMAT " to align constants\n",
- CodeCacheSegmentSize, sizeof(jdouble));
- return Flag::VIOLATES_CONSTRAINT;
- }
-
-#ifdef COMPILER2
- if (CodeCacheSegmentSize < (uintx)OptoLoopAlignment) {
- CommandLineError::print(verbose,
- "CodeCacheSegmentSize (" UINTX_FORMAT ") must be "
- "larger than or equal to OptoLoopAlignment (" INTX_FORMAT ") "
- "to align inner loops\n",
- CodeCacheSegmentSize, OptoLoopAlignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif
-
- return Flag::SUCCESS;
-}
-
-Flag::Error CompilerThreadPriorityConstraintFunc(intx value, bool verbose) {
-#ifdef SOLARIS
- if ((value < MinimumPriority || value > MaximumPriority) &&
- (value != -1) && (value != -FXCriticalPriority)) {
- CommandLineError::print(verbose,
- "CompileThreadPriority (" INTX_FORMAT ") must be "
- "between %d and %d inclusively or -1 (means no change) "
- "or %d (special value for critical thread class/priority)\n",
- value, MinimumPriority, MaximumPriority, -FXCriticalPriority);
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif
-
- return Flag::SUCCESS;
-}
-
-Flag::Error CodeEntryAlignmentConstraintFunc(intx value, bool verbose) {
-#ifdef SPARC
- if (CodeEntryAlignment % relocInfo::addr_unit() != 0) {
- CommandLineError::print(verbose,
- "CodeEntryAlignment (" INTX_FORMAT ") must be "
- "multiple of NOP size\n", CodeEntryAlignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif
-
- if (!is_power_of_2(value)) {
- CommandLineError::print(verbose,
- "CodeEntryAlignment (" INTX_FORMAT ") must be "
- "a power of two\n", CodeEntryAlignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- if (CodeEntryAlignment < 16) {
- CommandLineError::print(verbose,
- "CodeEntryAlignment (" INTX_FORMAT ") must be "
- "greater than or equal to %d\n",
- CodeEntryAlignment, 16);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error OptoLoopAlignmentConstraintFunc(intx value, bool verbose) {
- if (!is_power_of_2(value)) {
- CommandLineError::print(verbose,
- "OptoLoopAlignment (" INTX_FORMAT ") "
- "must be a power of two\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- // Relevant on ppc, s390, sparc. Will be optimized where
- // addr_unit() == 1.
- if (OptoLoopAlignment % relocInfo::addr_unit() != 0) {
- CommandLineError::print(verbose,
- "OptoLoopAlignment (" INTX_FORMAT ") must be "
- "multiple of NOP size (%d)\n",
- value, relocInfo::addr_unit());
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error ArraycopyDstPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
- if (value >= 4032) {
- CommandLineError::print(verbose,
- "ArraycopyDstPrefetchDistance (" UINTX_FORMAT ") must be"
- "between 0 and 4031\n", value);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
- if (value >= 4032) {
- CommandLineError::print(verbose,
- "ArraycopySrcPrefetchDistance (" UINTX_FORMAT ") must be"
- "between 0 and 4031\n", value);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error TypeProfileLevelConstraintFunc(uintx value, bool verbose) {
- for (int i = 0; i < 3; i++) {
- if (value % 10 > 2) {
- CommandLineError::print(verbose,
- "Invalid value (" UINTX_FORMAT ") "
- "in TypeProfileLevel at position %d\n", value, i);
- return Flag::VIOLATES_CONSTRAINT;
- }
- value = value / 10;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error InitArrayShortSizeConstraintFunc(intx value, bool verbose) {
- if (value % BytesPerLong != 0) {
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-#ifdef COMPILER2
-Flag::Error InteriorEntryAlignmentConstraintFunc(intx value, bool verbose) {
- if (InteriorEntryAlignment > CodeEntryAlignment) {
- CommandLineError::print(verbose,
- "InteriorEntryAlignment (" INTX_FORMAT ") must be "
- "less than or equal to CodeEntryAlignment (" INTX_FORMAT ")\n",
- InteriorEntryAlignment, CodeEntryAlignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
-#ifdef SPARC
- if (InteriorEntryAlignment % relocInfo::addr_unit() != 0) {
- CommandLineError::print(verbose,
- "InteriorEntryAlignment (" INTX_FORMAT ") must be "
- "multiple of NOP size\n");
- return Flag::VIOLATES_CONSTRAINT;
- }
-#endif
-
- if (!is_power_of_2(value)) {
- CommandLineError::print(verbose,
- "InteriorEntryAlignment (" INTX_FORMAT ") must be "
- "a power of two\n", InteriorEntryAlignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- int minimum_alignment = 16;
-#if defined(SPARC) || (defined(X86) && !defined(AMD64))
- minimum_alignment = 4;
-#elif defined(S390)
- minimum_alignment = 2;
-#endif
-
- if (InteriorEntryAlignment < minimum_alignment) {
- CommandLineError::print(verbose,
- "InteriorEntryAlignment (" INTX_FORMAT ") must be "
- "greater than or equal to %d\n",
- InteriorEntryAlignment, minimum_alignment);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-
-Flag::Error NodeLimitFudgeFactorConstraintFunc(intx value, bool verbose) {
- if (value < MaxNodeLimit * 2 / 100 || value > MaxNodeLimit * 40 / 100) {
- CommandLineError::print(verbose,
- "NodeLimitFudgeFactor must be between 2%% and 40%% "
- "of MaxNodeLimit (" INTX_FORMAT ")\n",
- MaxNodeLimit);
- return Flag::VIOLATES_CONSTRAINT;
- }
-
- return Flag::SUCCESS;
-}
-#endif // COMPILER2
-
-Flag::Error RTMTotalCountIncrRateConstraintFunc(int value, bool verbose) {
-#if INCLUDE_RTM_OPT
- if (UseRTMLocking && !is_power_of_2(RTMTotalCountIncrRate)) {
- CommandLineError::print(verbose,
- "RTMTotalCountIncrRate (" INTX_FORMAT
- ") must be a power of 2, resetting it to 64\n",
- RTMTotalCountIncrRate);
- FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
- }
-#endif
-
- return Flag::SUCCESS;
-}
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsCompiler.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSCOMPILER_HPP
-#define SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSCOMPILER_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-/*
- * Here we have compiler arguments constraints functions, which are called automatically
- * whenever flag's value changes. If the constraint fails the function should return
- * an appropriate error value.
- */
-
-Flag::Error AliasLevelConstraintFunc(intx value, bool verbose);
-
-Flag::Error CICompilerCountConstraintFunc(intx value, bool verbose);
-
-Flag::Error AllocatePrefetchDistanceConstraintFunc(intx value, bool verbose);
-
-Flag::Error AllocatePrefetchInstrConstraintFunc(intx value, bool verbose);
-
-Flag::Error AllocatePrefetchStepSizeConstraintFunc(intx value, bool verbose);
-
-Flag::Error CompileThresholdConstraintFunc(intx value, bool verbose);
-
-Flag::Error OnStackReplacePercentageConstraintFunc(intx value, bool verbose);
-
-Flag::Error CodeCacheSegmentSizeConstraintFunc(uintx value, bool verbose);
-
-Flag::Error CompilerThreadPriorityConstraintFunc(intx value, bool verbose);
-
-Flag::Error CodeEntryAlignmentConstraintFunc(intx value, bool verbose);
-
-Flag::Error OptoLoopAlignmentConstraintFunc(intx value, bool verbose);
-
-Flag::Error ArraycopyDstPrefetchDistanceConstraintFunc(uintx value, bool verbose);
-
-Flag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose);
-
-Flag::Error TypeProfileLevelConstraintFunc(uintx value, bool verbose);
-
-Flag::Error InitArrayShortSizeConstraintFunc(intx value, bool verbose);
-
-#ifdef COMPILER2
-Flag::Error InteriorEntryAlignmentConstraintFunc(intx value, bool verbose);
-
-Flag::Error NodeLimitFudgeFactorConstraintFunc(intx value, bool verbose);
-#endif
-
-Flag::Error RTMTotalCountIncrRateConstraintFunc(int value, bool verbose);
-
-#endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSCOMPILER_HPP */
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
-/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagConstraintsRuntime.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/safepointMechanism.hpp"
-#include "runtime/task.hpp"
-#include "utilities/defaultStream.hpp"
-
-Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
- if (!is_power_of_2(value)) {
- CommandLineError::print(verbose,
- "ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
- "power of 2\n",
- value);
- return Flag::VIOLATES_CONSTRAINT;
- }
- // In case page size is very small.
- if (value >= (intx)os::vm_page_size()) {
- CommandLineError::print(verbose,
- "ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
- "less than page size (" INTX_FORMAT ")\n",
- value, (intx)os::vm_page_size());
- return Flag::VIOLATES_CONSTRAINT;
- }
- return Flag::SUCCESS;
-}
-
-// Need to enforce the padding not to break the existing field alignments.
-// It is sufficient to check against the largest type size.
-Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose) {
- if ((value % BytesPerLong) != 0) {
- CommandLineError::print(verbose,
- "ContendedPaddingWidth (" INTX_FORMAT ") must be "
- "a multiple of %d\n",
- value, BytesPerLong);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose) {
- if (value > BiasedLockingBulkRevokeThreshold) {
- CommandLineError::print(verbose,
- "BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ") must be "
- "less than or equal to BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")\n",
- value, BiasedLockingBulkRevokeThreshold);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose) {
- if ((value % PeriodicTask::interval_gran) != 0) {
- CommandLineError::print(verbose,
- "BiasedLockingStartupDelay (" INTX_FORMAT ") must be "
- "evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
- value, PeriodicTask::interval_gran);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose) {
- if (value < BiasedLockingBulkRebiasThreshold) {
- CommandLineError::print(verbose,
- "BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ") must be "
- "greater than or equal to BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")\n",
- value, BiasedLockingBulkRebiasThreshold);
- return Flag::VIOLATES_CONSTRAINT;
- } else if ((double)value/(double)BiasedLockingDecayTime > 0.1) {
- CommandLineError::print(verbose,
- "The ratio of BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")"
- " to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
- "less than or equal to 0.1\n",
- value, BiasedLockingBulkRebiasThreshold);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose) {
- if (BiasedLockingBulkRebiasThreshold/(double)value > 0.1) {
- CommandLineError::print(verbose,
- "The ratio of BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")"
- " to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
- "less than or equal to 0.1\n",
- BiasedLockingBulkRebiasThreshold, value);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose) {
- if ((value % PeriodicTask::interval_gran != 0)) {
- CommandLineError::print(verbose,
- "PerfDataSamplingInterval (" INTX_FORMAT ") must be "
- "evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
- value, PeriodicTask::interval_gran);
- return Flag::VIOLATES_CONSTRAINT;
- } else {
- return Flag::SUCCESS;
- }
-}
-
-Flag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose) {
- if (value) {
- if (!SafepointMechanism::supports_thread_local_poll()) {
- CommandLineError::print(verbose, "ThreadLocalHandshakes not yet supported on this platform\n");
- return Flag::VIOLATES_CONSTRAINT;
- }
- }
- return Flag::SUCCESS;
-}
--- a/src/hotspot/share/runtime/commandLineFlagConstraintsRuntime.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP
-#define SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-/*
- * Here we have runtime arguments constraints functions, which are called automatically
- * whenever flag's value changes. If the constraint fails the function should return
- * an appropriate error value.
- */
-
-Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose);
-
-Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose);
-
-Flag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose);
-Flag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose);
-Flag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose);
-Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose);
-
-Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
-
-Flag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose);
-
-
-#endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */
--- a/src/hotspot/share/runtime/commandLineFlagRangeList.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,441 +0,0 @@
-/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "jvm.h"
-#include "classfile/stringTable.hpp"
-#include "classfile/symbolTable.hpp"
-#include "gc/shared/referenceProcessor.hpp"
-#include "oops/markOop.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/os.hpp"
-#include "runtime/task.hpp"
-#include "utilities/defaultStream.hpp"
-#include "utilities/macros.hpp"
-
-void CommandLineError::print(bool verbose, const char* msg, ...) {
- if (verbose) {
- va_list listPointer;
- va_start(listPointer, msg);
- jio_vfprintf(defaultStream::error_stream(), msg, listPointer);
- va_end(listPointer);
- }
-}
-
-class CommandLineFlagRange_int : public CommandLineFlagRange {
- int _min;
- int _max;
- const int* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_int(const char* name, const int* ptr, int min, int max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_int(*_ptr, verbose);
- }
-
- Flag::Error check_int(int value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "int %s=%d is outside the allowed range "
- "[ %d ... %d ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ %-25d ... %25d ]", _min, _max);
- }
-};
-
-class CommandLineFlagRange_intx : public CommandLineFlagRange {
- intx _min;
- intx _max;
- const intx* _ptr;
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_intx(const char* name, const intx* ptr, intx min, intx max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_intx(*_ptr, verbose);
- }
-
- Flag::Error check_intx(intx value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "intx %s=" INTX_FORMAT " is outside the allowed range "
- "[ " INTX_FORMAT " ... " INTX_FORMAT " ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ " INTX_FORMAT_W(-25) " ... " INTX_FORMAT_W(25) " ]", _min, _max);
- }
-};
-
-class CommandLineFlagRange_uint : public CommandLineFlagRange {
- uint _min;
- uint _max;
- const uint* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_uint(const char* name, const uint* ptr, uint min, uint max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_uint(*_ptr, verbose);
- }
-
- Flag::Error check_uint(uint value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "uint %s=%u is outside the allowed range "
- "[ %u ... %u ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ %-25u ... %25u ]", _min, _max);
- }
-};
-
-class CommandLineFlagRange_uintx : public CommandLineFlagRange {
- uintx _min;
- uintx _max;
- const uintx* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_uintx(const char* name, const uintx* ptr, uintx min, uintx max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_uintx(*_ptr, verbose);
- }
-
- Flag::Error check_uintx(uintx value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "uintx %s=" UINTX_FORMAT " is outside the allowed range "
- "[ " UINTX_FORMAT " ... " UINTX_FORMAT " ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ " UINTX_FORMAT_W(-25) " ... " UINTX_FORMAT_W(25) " ]", _min, _max);
- }
-};
-
-class CommandLineFlagRange_uint64_t : public CommandLineFlagRange {
- uint64_t _min;
- uint64_t _max;
- const uint64_t* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_uint64_t(const char* name, const uint64_t* ptr, uint64_t min, uint64_t max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_uint64_t(*_ptr, verbose);
- }
-
- Flag::Error check_uint64_t(uint64_t value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "uint64_t %s=" UINT64_FORMAT " is outside the allowed range "
- "[ " UINT64_FORMAT " ... " UINT64_FORMAT " ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ " UINT64_FORMAT_W(-25) " ... " UINT64_FORMAT_W(25) " ]", _min, _max);
- }
-};
-
-class CommandLineFlagRange_size_t : public CommandLineFlagRange {
- size_t _min;
- size_t _max;
- const size_t* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_size_t(const char* name, const size_t* ptr, size_t min, size_t max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_size_t(*_ptr, verbose);
- }
-
- Flag::Error check_size_t(size_t value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "size_t %s=" SIZE_FORMAT " is outside the allowed range "
- "[ " SIZE_FORMAT " ... " SIZE_FORMAT " ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ " SIZE_FORMAT_W(-25) " ... " SIZE_FORMAT_W(25) " ]", _min, _max);
- }
-};
-
-class CommandLineFlagRange_double : public CommandLineFlagRange {
- double _min;
- double _max;
- const double* _ptr;
-
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange_double(const char* name, const double* ptr, double min, double max)
- : CommandLineFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
-
- Flag::Error check(bool verbose = true) {
- return check_double(*_ptr, verbose);
- }
-
- Flag::Error check_double(double value, bool verbose = true) {
- if ((value < _min) || (value > _max)) {
- CommandLineError::print(verbose,
- "double %s=%f is outside the allowed range "
- "[ %f ... %f ]\n",
- name(), value, _min, _max);
- return Flag::OUT_OF_BOUNDS;
- } else {
- return Flag::SUCCESS;
- }
- }
-
- void print(outputStream* st) {
- st->print("[ %-25.3f ... %25.3f ]", _min, _max);
- }
-};
-
-// No constraint emitting
-void emit_range_no(...) { /* NOP */ }
-
-// No constraint emitting if function argument is NOT provided
-void emit_range_bool(const char* /*name*/, const bool* /*value*/) { /* NOP */ }
-void emit_range_ccstr(const char* /*name*/, const ccstr* /*value*/) { /* NOP */ }
-void emit_range_ccstrlist(const char* /*name*/, const ccstrlist* /*value*/) { /* NOP */ }
-void emit_range_int(const char* /*name*/, const int* /*value*/) { /* NOP */ }
-void emit_range_intx(const char* /*name*/, const intx* /*value*/) { /* NOP */ }
-void emit_range_uint(const char* /*name*/, const uint* /*value*/) { /* NOP */ }
-void emit_range_uintx(const char* /*name*/, const uintx* /*value*/) { /* NOP */ }
-void emit_range_uint64_t(const char* /*name*/, const uint64_t* /*value*/) { /* NOP */ }
-void emit_range_size_t(const char* /*name*/, const size_t* /*value*/) { /* NOP */ }
-void emit_range_double(const char* /*name*/, const double* /*value*/) { /* NOP */ }
-
-// CommandLineFlagRange emitting code functions if range arguments are provided
-void emit_range_int(const char* name, const int* ptr, int min, int max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_int(name, ptr, min, max));
-}
-void emit_range_intx(const char* name, const intx* ptr, intx min, intx max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_intx(name, ptr, min, max));
-}
-void emit_range_uint(const char* name, const uint* ptr, uint min, uint max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_uint(name, ptr, min, max));
-}
-void emit_range_uintx(const char* name, const uintx* ptr, uintx min, uintx max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_uintx(name, ptr, min, max));
-}
-void emit_range_uint64_t(const char* name, const uint64_t* ptr, uint64_t min, uint64_t max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_uint64_t(name, ptr, min, max));
-}
-void emit_range_size_t(const char* name, const size_t* ptr, size_t min, size_t max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_size_t(name, ptr, min, max));
-}
-void emit_range_double(const char* name, const double* ptr, double min, double max) {
- CommandLineFlagRangeList::add(new CommandLineFlagRange_double(name, ptr, min, max));
-}
-
-// Generate code to call emit_range_xxx function
-#define EMIT_RANGE_PRODUCT_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_COMMERCIAL_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_DIAGNOSTIC_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_EXPERIMENTAL_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_MANAGEABLE_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_PRODUCT_RW_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_PD_PRODUCT_FLAG(type, name, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_PD_DIAGNOSTIC_FLAG(type, name, doc) ); emit_range_##type(#name,&name
-#ifndef PRODUCT
-#define EMIT_RANGE_DEVELOPER_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_PD_DEVELOPER_FLAG(type, name, doc) ); emit_range_##type(#name,&name
-#define EMIT_RANGE_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#else
-#define EMIT_RANGE_DEVELOPER_FLAG(type, name, value, doc) ); emit_range_no(#name,&name
-#define EMIT_RANGE_PD_DEVELOPER_FLAG(type, name, doc) ); emit_range_no(#name,&name
-#define EMIT_RANGE_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_range_no(#name,&name
-#endif
-#ifdef _LP64
-#define EMIT_RANGE_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
-#else
-#define EMIT_RANGE_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_range_no(#name,&name
-#endif
-
-// Generate func argument to pass into emit_range_xxx functions
-#define EMIT_RANGE_CHECK(a, b) , a, b
-
-#define INITIAL_RANGES_SIZE 379
-GrowableArray<CommandLineFlagRange*>* CommandLineFlagRangeList::_ranges = NULL;
-
-// Check the ranges of all flags that have them
-void CommandLineFlagRangeList::init(void) {
-
- _ranges = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<CommandLineFlagRange*>(INITIAL_RANGES_SIZE, true);
-
- emit_range_no(NULL VM_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
- EMIT_RANGE_PD_DEVELOPER_FLAG,
- EMIT_RANGE_PRODUCT_FLAG,
- EMIT_RANGE_PD_PRODUCT_FLAG,
- EMIT_RANGE_DIAGNOSTIC_FLAG,
- EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
- EMIT_RANGE_EXPERIMENTAL_FLAG,
- EMIT_RANGE_NOTPRODUCT_FLAG,
- EMIT_RANGE_MANAGEABLE_FLAG,
- EMIT_RANGE_PRODUCT_RW_FLAG,
- EMIT_RANGE_LP64_PRODUCT_FLAG,
- EMIT_RANGE_CHECK,
- IGNORE_CONSTRAINT,
- IGNORE_WRITEABLE));
-
- EMIT_RANGES_FOR_GLOBALS_EXT
-
- emit_range_no(NULL ARCH_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
- EMIT_RANGE_PRODUCT_FLAG,
- EMIT_RANGE_DIAGNOSTIC_FLAG,
- EMIT_RANGE_EXPERIMENTAL_FLAG,
- EMIT_RANGE_NOTPRODUCT_FLAG,
- EMIT_RANGE_CHECK,
- IGNORE_CONSTRAINT,
- IGNORE_WRITEABLE));
-
-#if INCLUDE_JVMCI
- emit_range_no(NULL JVMCI_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
- EMIT_RANGE_PD_DEVELOPER_FLAG,
- EMIT_RANGE_PRODUCT_FLAG,
- EMIT_RANGE_PD_PRODUCT_FLAG,
- EMIT_RANGE_DIAGNOSTIC_FLAG,
- EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
- EMIT_RANGE_EXPERIMENTAL_FLAG,
- EMIT_RANGE_NOTPRODUCT_FLAG,
- EMIT_RANGE_CHECK,
- IGNORE_CONSTRAINT,
- IGNORE_WRITEABLE));
-#endif // INCLUDE_JVMCI
-
-#ifdef COMPILER1
- emit_range_no(NULL C1_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
- EMIT_RANGE_PD_DEVELOPER_FLAG,
- EMIT_RANGE_PRODUCT_FLAG,
- EMIT_RANGE_PD_PRODUCT_FLAG,
- EMIT_RANGE_DIAGNOSTIC_FLAG,
- EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
- EMIT_RANGE_NOTPRODUCT_FLAG,
- EMIT_RANGE_CHECK,
- IGNORE_CONSTRAINT,
- IGNORE_WRITEABLE));
-#endif // COMPILER1
-
-#ifdef COMPILER2
- emit_range_no(NULL C2_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
- EMIT_RANGE_PD_DEVELOPER_FLAG,
- EMIT_RANGE_PRODUCT_FLAG,
- EMIT_RANGE_PD_PRODUCT_FLAG,
- EMIT_RANGE_DIAGNOSTIC_FLAG,
- EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
- EMIT_RANGE_EXPERIMENTAL_FLAG,
- EMIT_RANGE_NOTPRODUCT_FLAG,
- EMIT_RANGE_CHECK,
- IGNORE_CONSTRAINT,
- IGNORE_WRITEABLE));
-#endif // COMPILER2
-}
-
-CommandLineFlagRange* CommandLineFlagRangeList::find(const char* name) {
- CommandLineFlagRange* found = NULL;
- for (int i=0; i<length(); i++) {
- CommandLineFlagRange* range = at(i);
- if (strcmp(range->name(), name) == 0) {
- found = range;
- break;
- }
- }
- return found;
-}
-
-void CommandLineFlagRangeList::print(outputStream* st, const char* name, RangeStrFunc default_range_str_func) {
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- range->print(st);
- } else {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find(name);
- if (constraint != NULL) {
- assert(default_range_str_func!=NULL, "default_range_str_func must be provided");
- st->print("%s", default_range_str_func());
- } else {
- st->print("[ ... ]");
- }
- }
-}
-
-bool CommandLineFlagRangeList::check_ranges() {
- // Check ranges.
- bool status = true;
- for (int i=0; i<length(); i++) {
- CommandLineFlagRange* range = at(i);
- if (range->check(true) != Flag::SUCCESS) status = false;
- }
- return status;
-}
--- a/src/hotspot/share/runtime/commandLineFlagRangeList.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_COMMANDLINEFLAGRANGELIST_HPP
-#define SHARE_VM_RUNTIME_COMMANDLINEFLAGRANGELIST_HPP
-
-#include "memory/metaspaceShared.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/growableArray.hpp"
-
-/*
- * Here we have a mechanism for extracting ranges specified in flag macro tables.
- *
- * The specified ranges are used to verify that flags have valid values.
- *
- * An example of a range is "min <= flag <= max". Both "min" and "max" must be
- * constant and can not change. If either "min" or "max" can change,
- * then we need to use constraint instead.
- */
-
-class CommandLineError : public AllStatic {
-public:
- static void print(bool verbose, const char* msg, ...);
-};
-
-class CommandLineFlagRange : public CHeapObj<mtArguments> {
-private:
- const char* _name;
-public:
- // the "name" argument must be a string literal
- CommandLineFlagRange(const char* name) { _name=name; }
- ~CommandLineFlagRange() {}
- const char* name() { return _name; }
- virtual Flag::Error check(bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_int(int value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_intx(intx value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_uint(uint value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_uintx(uintx value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_uint64_t(uint64_t value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_size_t(size_t value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual Flag::Error check_double(double value, bool verbose = true) { ShouldNotReachHere(); return Flag::ERR_OTHER; }
- virtual void print(outputStream* st) { ; }
-};
-
-class CommandLineFlagRangeList : public AllStatic {
- static GrowableArray<CommandLineFlagRange*>* _ranges;
-public:
- static void init();
- static int length() { return (_ranges != NULL) ? _ranges->length() : 0; }
- static CommandLineFlagRange* at(int i) { return (_ranges != NULL) ? _ranges->at(i) : NULL; }
- static CommandLineFlagRange* find(const char* name);
- static void add(CommandLineFlagRange* range) { _ranges->append(range); }
- static void print(outputStream* st, const char* name, RangeStrFunc default_range_str_func);
- // Check the final values of all flags for ranges.
- static bool check_ranges();
-};
-
-#endif // SHARE_VM_RUNTIME_COMMANDLINEFLAGRANGELIST_HPP
--- a/src/hotspot/share/runtime/commandLineFlagWriteableList.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,205 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/plab.hpp"
-#include "runtime/commandLineFlagWriteableList.hpp"
-#include "runtime/os.hpp"
-#ifdef COMPILER1
-#include "c1/c1_globals.hpp"
-#endif // COMPILER1
-#ifdef COMPILER2
-#include "opto/c2_globals.hpp"
-#endif // COMPILER2
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci_globals.hpp"
-#endif
-
-bool CommandLineFlagWriteable::is_writeable(void) {
- return _writeable;
-}
-
-void CommandLineFlagWriteable::mark_once(void) {
- if (_type == Once) {
- _writeable = false;
- }
-}
-
-void CommandLineFlagWriteable::mark_startup(void) {
- if (_type == CommandLineFlagWriteable::CommandLineOnly) {
- _writeable = false;
- }
-}
-
-// No control emitting
-void emit_writeable_no(...) { /* NOP */ }
-
-// No control emitting if type argument is NOT provided
-void emit_writeable_bool(const char* /*name*/) { /* NOP */ }
-void emit_writeable_ccstr(const char* /*name*/) { /* NOP */ }
-void emit_writeable_ccstrlist(const char* /*name*/) { /* NOP */ }
-void emit_writeable_int(const char* /*name*/) { /* NOP */ }
-void emit_writeable_intx(const char* /*name*/) { /* NOP */ }
-void emit_writeable_uint(const char* /*name*/) { /* NOP */ }
-void emit_writeable_uintx(const char* /*name*/) { /* NOP */ }
-void emit_writeable_uint64_t(const char* /*name*/) { /* NOP */ }
-void emit_writeable_size_t(const char* /*name*/) { /* NOP */ }
-void emit_writeable_double(const char* /*name*/) { /* NOP */ }
-
-// CommandLineFlagWriteable emitting code functions if range arguments are provided
-void emit_writeable_bool(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_int(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_intx(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_uint(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_uintx(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_uint64_t(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_size_t(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-void emit_writeable_double(const char* name, CommandLineFlagWriteable::WriteableType type) {
- CommandLineFlagWriteableList::add(new CommandLineFlagWriteable(name, type));
-}
-
-// Generate code to call emit_writeable_xxx function
-#define EMIT_WRITEABLE_PRODUCT_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_COMMERCIAL_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_DIAGNOSTIC_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_EXPERIMENTAL_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_MANAGEABLE_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_PRODUCT_RW_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_PD_PRODUCT_FLAG(type, name, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_DEVELOPER_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_PD_DEVELOPER_FLAG(type, name, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG(type, name, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-#define EMIT_WRITEABLE_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
-
-// Generate type argument to pass into emit_writeable_xxx functions
-#define EMIT_WRITEABLE(a) , CommandLineFlagWriteable::a
-
-#define INITIAL_WRITEABLES_SIZE 2
-GrowableArray<CommandLineFlagWriteable*>* CommandLineFlagWriteableList::_controls = NULL;
-
-void CommandLineFlagWriteableList::init(void) {
-
- _controls = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<CommandLineFlagWriteable*>(INITIAL_WRITEABLES_SIZE, true);
-
- emit_writeable_no(NULL VM_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PRODUCT_FLAG,
- EMIT_WRITEABLE_PD_PRODUCT_FLAG,
- EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
- EMIT_WRITEABLE_NOTPRODUCT_FLAG,
- EMIT_WRITEABLE_MANAGEABLE_FLAG,
- EMIT_WRITEABLE_PRODUCT_RW_FLAG,
- EMIT_WRITEABLE_LP64_PRODUCT_FLAG,
- IGNORE_RANGE,
- IGNORE_CONSTRAINT,
- EMIT_WRITEABLE));
-
- EMIT_WRITEABLES_FOR_GLOBALS_EXT
-
- emit_writeable_no(NULL ARCH_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PRODUCT_FLAG,
- EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
- EMIT_WRITEABLE_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- IGNORE_CONSTRAINT,
- EMIT_WRITEABLE));
-
-#if INCLUDE_JVMCI
- emit_writeable_no(NULL JVMCI_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PRODUCT_FLAG,
- EMIT_WRITEABLE_PD_PRODUCT_FLAG,
- EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
- EMIT_WRITEABLE_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- IGNORE_CONSTRAINT,
- EMIT_WRITEABLE));
-#endif // INCLUDE_JVMCI
-
-#ifdef COMPILER1
- emit_writeable_no(NULL C1_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PRODUCT_FLAG,
- EMIT_WRITEABLE_PD_PRODUCT_FLAG,
- EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- IGNORE_CONSTRAINT,
- EMIT_WRITEABLE));
-#endif // COMPILER1
-
-#ifdef COMPILER2
- emit_writeable_no(NULL C2_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
- EMIT_WRITEABLE_PRODUCT_FLAG,
- EMIT_WRITEABLE_PD_PRODUCT_FLAG,
- EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
- EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
- EMIT_WRITEABLE_NOTPRODUCT_FLAG,
- IGNORE_RANGE,
- IGNORE_CONSTRAINT,
- EMIT_WRITEABLE));
-#endif // COMPILER2
-}
-
-CommandLineFlagWriteable* CommandLineFlagWriteableList::find(const char* name) {
- CommandLineFlagWriteable* found = NULL;
- for (int i=0; i<length(); i++) {
- CommandLineFlagWriteable* writeable = at(i);
- if (strcmp(writeable->name(), name) == 0) {
- found = writeable;
- break;
- }
- }
- return found;
-}
-
-void CommandLineFlagWriteableList::mark_startup(void) {
- for (int i=0; i<length(); i++) {
- CommandLineFlagWriteable* writeable = at(i);
- writeable->mark_startup();
- }
-}
--- a/src/hotspot/share/runtime/commandLineFlagWriteableList.hpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_COMMANDLINEFLAGWRITEABLE_HPP
-#define SHARE_VM_RUNTIME_COMMANDLINEFLAGWRITEABLE_HPP
-
-#include "runtime/globals.hpp"
-#include "utilities/growableArray.hpp"
-
-class CommandLineFlagWriteable : public CHeapObj<mtArguments> {
-public:
- enum WriteableType {
- // can be set without any limits
- Always = 0,
- // can only be set once, either via command lines or during runtime
- Once = 1,
- // can only be set on command line (multiple times allowed)
- CommandLineOnly = 2
- };
-private:
- const char* _name;
- WriteableType _type;
- bool _writeable;
- bool _startup_done;
-public:
- // the "name" argument must be a string literal
- CommandLineFlagWriteable(const char* name, WriteableType type) { _name=name; _type=type; _writeable=true; _startup_done=false; }
- ~CommandLineFlagWriteable() {}
- const char* name() { return _name; }
- const WriteableType type() { return _type; }
- bool is_writeable(void);
- void mark_once(void);
- void mark_startup(void);
-};
-
-class CommandLineFlagWriteableList : public AllStatic {
- static GrowableArray<CommandLineFlagWriteable*>* _controls;
-public:
- static void init();
- static int length() { return (_controls != NULL) ? _controls->length() : 0; }
- static CommandLineFlagWriteable* at(int i) { return (_controls != NULL) ? _controls->at(i) : NULL; }
- static CommandLineFlagWriteable* find(const char* name);
- static void add(CommandLineFlagWriteable* range) { _controls->append(range); }
- static void mark_startup(void);
-};
-
-#endif // SHARE_VM_RUNTIME_COMMANDLINEFLAGWRITEABLE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/flagSetting.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_FLAGS_FLAGSETTING_HPP
+#define SHARE_VM_RUNTIME_FLAGS_FLAGSETTING_HPP
+
+#include "memory/allocation.hpp"
+
+// debug flags control various aspects of the VM and are global accessible
+
+// use FlagSetting to temporarily change some debug flag
+// e.g. FlagSetting fs(DebugThisAndThat, true);
+// restored to previous value upon leaving scope
+class FlagSetting : public StackObj {
+ bool val;
+ bool* flag;
+public:
+ FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~FlagSetting() { *flag = val; }
+};
+
+class UIntFlagSetting : public StackObj {
+ uint val;
+ uint* flag;
+public:
+ UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~UIntFlagSetting() { *flag = val; }
+};
+
+class SizeTFlagSetting : public StackObj {
+ size_t val;
+ size_t* flag;
+public:
+ SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
+ ~SizeTFlagSetting() { *flag = val; }
+};
+
+// Helper class for temporarily saving the value of a flag during a scope.
+template <size_t SIZE>
+class FlagGuard {
+ unsigned char _value[SIZE];
+ void* const _addr;
+public:
+ FlagGuard(void* flag_addr) : _addr(flag_addr) { memcpy(_value, _addr, SIZE); }
+ ~FlagGuard() { memcpy(_addr, _value, SIZE); }
+};
+
+#define FLAG_GUARD(f) FlagGuard<sizeof(f)> f ## _guard(&f)
+
+#endif // SHARE_VM_RUNTIME_FLAGS_FLAGSETTING_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlag.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,1506 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagWriteableList.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals_extension.hpp"
+#include "trace/tracing.hpp"
+#include "utilities/defaultStream.hpp"
+#include "utilities/stringUtils.hpp"
+
+#define DEFAULT_RANGE_STR_CHUNK_SIZE 64
+static char* create_range_str(const char *fmt, ...) {
+ static size_t string_length = DEFAULT_RANGE_STR_CHUNK_SIZE;
+ static char* range_string = NEW_C_HEAP_ARRAY(char, string_length, mtLogging);
+
+ int size_needed = 0;
+ do {
+ va_list args;
+ va_start(args, fmt);
+ size_needed = jio_vsnprintf(range_string, string_length, fmt, args);
+ va_end(args);
+
+ if (size_needed < 0) {
+ string_length += DEFAULT_RANGE_STR_CHUNK_SIZE;
+ range_string = REALLOC_C_HEAP_ARRAY(char, range_string, string_length, mtLogging);
+ guarantee(range_string != NULL, "create_range_str string should not be NULL");
+ }
+ } while (size_needed < 0);
+
+ return range_string;
+}
+
+const char* JVMFlag::get_int_default_range_str() {
+ return create_range_str("[ " INT32_FORMAT_W(-25) " ... " INT32_FORMAT_W(25) " ]", INT_MIN, INT_MAX);
+}
+
+const char* JVMFlag::get_uint_default_range_str() {
+ return create_range_str("[ " UINT32_FORMAT_W(-25) " ... " UINT32_FORMAT_W(25) " ]", 0, UINT_MAX);
+}
+
+const char* JVMFlag::get_intx_default_range_str() {
+ return create_range_str("[ " INTX_FORMAT_W(-25) " ... " INTX_FORMAT_W(25) " ]", min_intx, max_intx);
+}
+
+const char* JVMFlag::get_uintx_default_range_str() {
+ return create_range_str("[ " UINTX_FORMAT_W(-25) " ... " UINTX_FORMAT_W(25) " ]", 0, max_uintx);
+}
+
+const char* JVMFlag::get_uint64_t_default_range_str() {
+ return create_range_str("[ " UINT64_FORMAT_W(-25) " ... " UINT64_FORMAT_W(25) " ]", 0, uint64_t(max_juint));
+}
+
+const char* JVMFlag::get_size_t_default_range_str() {
+ return create_range_str("[ " SIZE_FORMAT_W(-25) " ... " SIZE_FORMAT_W(25) " ]", 0, SIZE_MAX);
+}
+
+const char* JVMFlag::get_double_default_range_str() {
+ return create_range_str("[ %-25.3f ... %25.3f ]", DBL_MIN, DBL_MAX);
+}
+
+static bool is_product_build() {
+#ifdef PRODUCT
+ return true;
+#else
+ return false;
+#endif
+}
+
+JVMFlag::Error JVMFlag::check_writable(bool changed) {
+ if (is_constant_in_binary()) {
+ fatal("flag is constant: %s", _name);
+ }
+
+ JVMFlag::Error error = JVMFlag::SUCCESS;
+ if (changed) {
+ JVMFlagWriteable* writeable = JVMFlagWriteableList::find(_name);
+ if (writeable) {
+ if (writeable->is_writeable() == false) {
+ switch (writeable->type())
+ {
+ case JVMFlagWriteable::Once:
+ error = JVMFlag::SET_ONLY_ONCE;
+ jio_fprintf(defaultStream::error_stream(), "Error: %s may not be set more than once\n", _name);
+ break;
+ case JVMFlagWriteable::CommandLineOnly:
+ error = JVMFlag::COMMAND_LINE_ONLY;
+ jio_fprintf(defaultStream::error_stream(), "Error: %s may be modified only from commad line\n", _name);
+ break;
+ default:
+ ShouldNotReachHere();
+ break;
+ }
+ }
+ writeable->mark_once();
+ }
+ }
+ return error;
+}
+
+bool JVMFlag::is_bool() const {
+ return strcmp(_type, "bool") == 0;
+}
+
+bool JVMFlag::get_bool() const {
+ return *((bool*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_bool(bool value) {
+ JVMFlag::Error error = check_writable(value!=get_bool());
+ if (error == JVMFlag::SUCCESS) {
+ *((bool*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_int() const {
+ return strcmp(_type, "int") == 0;
+}
+
+int JVMFlag::get_int() const {
+ return *((int*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_int(int value) {
+ JVMFlag::Error error = check_writable(value!=get_int());
+ if (error == JVMFlag::SUCCESS) {
+ *((int*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_uint() const {
+ return strcmp(_type, "uint") == 0;
+}
+
+uint JVMFlag::get_uint() const {
+ return *((uint*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_uint(uint value) {
+ JVMFlag::Error error = check_writable(value!=get_uint());
+ if (error == JVMFlag::SUCCESS) {
+ *((uint*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_intx() const {
+ return strcmp(_type, "intx") == 0;
+}
+
+intx JVMFlag::get_intx() const {
+ return *((intx*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_intx(intx value) {
+ JVMFlag::Error error = check_writable(value!=get_intx());
+ if (error == JVMFlag::SUCCESS) {
+ *((intx*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_uintx() const {
+ return strcmp(_type, "uintx") == 0;
+}
+
+uintx JVMFlag::get_uintx() const {
+ return *((uintx*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_uintx(uintx value) {
+ JVMFlag::Error error = check_writable(value!=get_uintx());
+ if (error == JVMFlag::SUCCESS) {
+ *((uintx*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_uint64_t() const {
+ return strcmp(_type, "uint64_t") == 0;
+}
+
+uint64_t JVMFlag::get_uint64_t() const {
+ return *((uint64_t*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_uint64_t(uint64_t value) {
+ JVMFlag::Error error = check_writable(value!=get_uint64_t());
+ if (error == JVMFlag::SUCCESS) {
+ *((uint64_t*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_size_t() const {
+ return strcmp(_type, "size_t") == 0;
+}
+
+size_t JVMFlag::get_size_t() const {
+ return *((size_t*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_size_t(size_t value) {
+ JVMFlag::Error error = check_writable(value!=get_size_t());
+ if (error == JVMFlag::SUCCESS) {
+ *((size_t*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_double() const {
+ return strcmp(_type, "double") == 0;
+}
+
+double JVMFlag::get_double() const {
+ return *((double*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_double(double value) {
+ JVMFlag::Error error = check_writable(value!=get_double());
+ if (error == JVMFlag::SUCCESS) {
+ *((double*) _addr) = value;
+ }
+ return error;
+}
+
+bool JVMFlag::is_ccstr() const {
+ return strcmp(_type, "ccstr") == 0 || strcmp(_type, "ccstrlist") == 0;
+}
+
+bool JVMFlag::ccstr_accumulates() const {
+ return strcmp(_type, "ccstrlist") == 0;
+}
+
+ccstr JVMFlag::get_ccstr() const {
+ return *((ccstr*) _addr);
+}
+
+JVMFlag::Error JVMFlag::set_ccstr(ccstr value) {
+ JVMFlag::Error error = check_writable(value!=get_ccstr());
+ if (error == JVMFlag::SUCCESS) {
+ *((ccstr*) _addr) = value;
+ }
+ return error;
+}
+
+
+JVMFlag::Flags JVMFlag::get_origin() {
+ return Flags(_flags & VALUE_ORIGIN_MASK);
+}
+
+void JVMFlag::set_origin(Flags origin) {
+ assert((origin & VALUE_ORIGIN_MASK) == origin, "sanity");
+ Flags new_origin = Flags((origin == COMMAND_LINE) ? Flags(origin | ORIG_COMMAND_LINE) : origin);
+ _flags = Flags((_flags & ~VALUE_ORIGIN_MASK) | new_origin);
+}
+
+bool JVMFlag::is_default() {
+ return (get_origin() == DEFAULT);
+}
+
+bool JVMFlag::is_ergonomic() {
+ return (get_origin() == ERGONOMIC);
+}
+
+bool JVMFlag::is_command_line() {
+ return (_flags & ORIG_COMMAND_LINE) != 0;
+}
+
+void JVMFlag::set_command_line() {
+ _flags = Flags(_flags | ORIG_COMMAND_LINE);
+}
+
+bool JVMFlag::is_product() const {
+ return (_flags & KIND_PRODUCT) != 0;
+}
+
+bool JVMFlag::is_manageable() const {
+ return (_flags & KIND_MANAGEABLE) != 0;
+}
+
+bool JVMFlag::is_diagnostic() const {
+ return (_flags & KIND_DIAGNOSTIC) != 0;
+}
+
+bool JVMFlag::is_experimental() const {
+ return (_flags & KIND_EXPERIMENTAL) != 0;
+}
+
+bool JVMFlag::is_notproduct() const {
+ return (_flags & KIND_NOT_PRODUCT) != 0;
+}
+
+bool JVMFlag::is_develop() const {
+ return (_flags & KIND_DEVELOP) != 0;
+}
+
+bool JVMFlag::is_read_write() const {
+ return (_flags & KIND_READ_WRITE) != 0;
+}
+
+bool JVMFlag::is_commercial() const {
+ return (_flags & KIND_COMMERCIAL) != 0;
+}
+
+/**
+ * Returns if this flag is a constant in the binary. Right now this is
+ * true for notproduct and develop flags in product builds.
+ */
+bool JVMFlag::is_constant_in_binary() const {
+#ifdef PRODUCT
+ return is_notproduct() || is_develop();
+#else
+ return false;
+#endif
+}
+
+bool JVMFlag::is_unlocker() const {
+ return strcmp(_name, "UnlockDiagnosticVMOptions") == 0 ||
+ strcmp(_name, "UnlockExperimentalVMOptions") == 0 ||
+ is_unlocker_ext();
+}
+
+bool JVMFlag::is_unlocked() const {
+ if (is_diagnostic()) {
+ return UnlockDiagnosticVMOptions;
+ }
+ if (is_experimental()) {
+ return UnlockExperimentalVMOptions;
+ }
+ return is_unlocked_ext();
+}
+
+void JVMFlag::clear_diagnostic() {
+ assert(is_diagnostic(), "sanity");
+ _flags = Flags(_flags & ~KIND_DIAGNOSTIC);
+ assert(!is_diagnostic(), "sanity");
+}
+
+// Get custom message for this locked flag, or NULL if
+// none is available. Returns message type produced.
+JVMFlag::MsgType JVMFlag::get_locked_message(char* buf, int buflen) const {
+ buf[0] = '\0';
+ if (is_diagnostic() && !is_unlocked()) {
+ jio_snprintf(buf, buflen,
+ "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n"
+ "Error: The unlock option must precede '%s'.\n",
+ _name, _name);
+ return JVMFlag::DIAGNOSTIC_FLAG_BUT_LOCKED;
+ }
+ if (is_experimental() && !is_unlocked()) {
+ jio_snprintf(buf, buflen,
+ "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n"
+ "Error: The unlock option must precede '%s'.\n",
+ _name, _name);
+ return JVMFlag::EXPERIMENTAL_FLAG_BUT_LOCKED;
+ }
+ if (is_develop() && is_product_build()) {
+ jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
+ _name);
+ return JVMFlag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD;
+ }
+ if (is_notproduct() && is_product_build()) {
+ jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
+ _name);
+ return JVMFlag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD;
+ }
+ return get_locked_message_ext(buf, buflen);
+}
+
+bool JVMFlag::is_writeable() const {
+ return is_manageable() || (is_product() && is_read_write()) || is_writeable_ext();
+}
+
+// All flags except "manageable" are assumed to be internal flags.
+// Long term, we need to define a mechanism to specify which flags
+// are external/stable and change this function accordingly.
+bool JVMFlag::is_external() const {
+ return is_manageable() || is_external_ext();
+}
+
+// Helper function for JVMFlag::print_on().
+// Fills current line up to requested position.
+// Should the current position already be past the requested position,
+// one separator blank is enforced.
+void fill_to_pos(outputStream* st, unsigned int req_pos) {
+ if ((unsigned int)st->position() < req_pos) {
+ st->fill_to(req_pos); // need to fill with blanks to reach req_pos
+ } else {
+ st->print(" "); // enforce blank separation. Previous field too long.
+ }
+}
+
+void JVMFlag::print_on(outputStream* st, bool withComments, bool printRanges) {
+ // Don't print notproduct and develop flags in a product build.
+ if (is_constant_in_binary()) {
+ return;
+ }
+
+ if (!printRanges) {
+ // The command line options -XX:+PrintFlags* cause this function to be called
+ // for each existing flag to print information pertinent to this flag. The data
+ // is displayed in columnar form, with the following layout:
+ // col1 - data type, right-justified
+ // col2 - name, left-justified
+ // col3 - ' =' double-char, leading space to align with possible '+='
+ // col4 - value left-justified
+ // col5 - kind right-justified
+ // col6 - origin left-justified
+ // col7 - comments left-justified
+ //
+ // The column widths are fixed. They are defined such that, for most cases,
+ // an eye-pleasing tabular output is created.
+ //
+ // Sample output:
+ // bool CMSScavengeBeforeRemark = false {product} {default}
+ // uintx CMSScheduleRemarkEdenPenetration = 50 {product} {default}
+ // size_t CMSScheduleRemarkEdenSizeThreshold = 2097152 {product} {default}
+ // uintx CMSScheduleRemarkSamplingRatio = 5 {product} {default}
+ // double CMSSmallCoalSurplusPercent = 1.050000 {product} {default}
+ // ccstr CompileCommandFile = MyFile.cmd {product} {command line}
+ // ccstrlist CompileOnly = Method1
+ // CompileOnly += Method2 {product} {command line}
+ // | | | | | | |
+ // | | | | | | +-- col7
+ // | | | | | +-- col6
+ // | | | | +-- col5
+ // | | | +-- col4
+ // | | +-- col3
+ // | +-- col2
+ // +-- col1
+
+ const unsigned int col_spacing = 1;
+ const unsigned int col1_pos = 0;
+ const unsigned int col1_width = 9;
+ const unsigned int col2_pos = col1_pos + col1_width + col_spacing;
+ const unsigned int col2_width = 39;
+ const unsigned int col3_pos = col2_pos + col2_width + col_spacing;
+ const unsigned int col3_width = 2;
+ const unsigned int col4_pos = col3_pos + col3_width + col_spacing;
+ const unsigned int col4_width = 30;
+ const unsigned int col5_pos = col4_pos + col4_width + col_spacing;
+ const unsigned int col5_width = 20;
+ const unsigned int col6_pos = col5_pos + col5_width + col_spacing;
+ const unsigned int col6_width = 15;
+ const unsigned int col7_pos = col6_pos + col6_width + col_spacing;
+ const unsigned int col7_width = 1;
+
+ st->fill_to(col1_pos);
+ st->print("%*s", col1_width, _type); // right-justified, therefore width is required.
+
+ fill_to_pos(st, col2_pos);
+ st->print("%s", _name);
+
+ fill_to_pos(st, col3_pos);
+ st->print(" ="); // use " =" for proper alignment with multiline ccstr output.
+
+ fill_to_pos(st, col4_pos);
+ if (is_bool()) {
+ st->print("%s", get_bool() ? "true" : "false");
+ } else if (is_int()) {
+ st->print("%d", get_int());
+ } else if (is_uint()) {
+ st->print("%u", get_uint());
+ } else if (is_intx()) {
+ st->print(INTX_FORMAT, get_intx());
+ } else if (is_uintx()) {
+ st->print(UINTX_FORMAT, get_uintx());
+ } else if (is_uint64_t()) {
+ st->print(UINT64_FORMAT, get_uint64_t());
+ } else if (is_size_t()) {
+ st->print(SIZE_FORMAT, get_size_t());
+ } else if (is_double()) {
+ st->print("%f", get_double());
+ } else if (is_ccstr()) {
+ // Honor <newline> characters in ccstr: print multiple lines.
+ const char* cp = get_ccstr();
+ if (cp != NULL) {
+ const char* eol;
+ while ((eol = strchr(cp, '\n')) != NULL) {
+ size_t llen = pointer_delta(eol, cp, sizeof(char));
+ st->print("%.*s", (int)llen, cp);
+ st->cr();
+ cp = eol+1;
+ fill_to_pos(st, col2_pos);
+ st->print("%s", _name);
+ fill_to_pos(st, col3_pos);
+ st->print("+=");
+ fill_to_pos(st, col4_pos);
+ }
+ st->print("%s", cp);
+ }
+ } else {
+ st->print("unhandled type %s", _type);
+ st->cr();
+ return;
+ }
+
+ fill_to_pos(st, col5_pos);
+ print_kind(st, col5_width);
+
+ fill_to_pos(st, col6_pos);
+ print_origin(st, col6_width);
+
+#ifndef PRODUCT
+ if (withComments) {
+ fill_to_pos(st, col7_pos);
+ st->print("%s", _doc);
+ }
+#endif
+ st->cr();
+ } else if (!is_bool() && !is_ccstr()) {
+ // The command line options -XX:+PrintFlags* cause this function to be called
+ // for each existing flag to print information pertinent to this flag. The data
+ // is displayed in columnar form, with the following layout:
+ // col1 - data type, right-justified
+ // col2 - name, left-justified
+ // col4 - range [ min ... max]
+ // col5 - kind right-justified
+ // col6 - origin left-justified
+ // col7 - comments left-justified
+ //
+ // The column widths are fixed. They are defined such that, for most cases,
+ // an eye-pleasing tabular output is created.
+ //
+ // Sample output:
+ // intx MinPassesBeforeFlush [ 0 ... 9223372036854775807 ] {diagnostic} {default}
+ // uintx MinRAMFraction [ 1 ... 18446744073709551615 ] {product} {default}
+ // double MinRAMPercentage [ 0.000 ... 100.000 ] {product} {default}
+ // uintx MinSurvivorRatio [ 3 ... 18446744073709551615 ] {product} {default}
+ // size_t MinTLABSize [ 1 ... 9223372036854775807 ] {product} {default}
+ // intx MonitorBound [ 0 ... 2147483647 ] {product} {default}
+ // | | | | | |
+ // | | | | | +-- col7
+ // | | | | +-- col6
+ // | | | +-- col5
+ // | | +-- col4
+ // | +-- col2
+ // +-- col1
+
+ const unsigned int col_spacing = 1;
+ const unsigned int col1_pos = 0;
+ const unsigned int col1_width = 9;
+ const unsigned int col2_pos = col1_pos + col1_width + col_spacing;
+ const unsigned int col2_width = 49;
+ const unsigned int col3_pos = col2_pos + col2_width + col_spacing;
+ const unsigned int col3_width = 0;
+ const unsigned int col4_pos = col3_pos + col3_width + col_spacing;
+ const unsigned int col4_width = 60;
+ const unsigned int col5_pos = col4_pos + col4_width + col_spacing;
+ const unsigned int col5_width = 35;
+ const unsigned int col6_pos = col5_pos + col5_width + col_spacing;
+ const unsigned int col6_width = 15;
+ const unsigned int col7_pos = col6_pos + col6_width + col_spacing;
+ const unsigned int col7_width = 1;
+
+ st->fill_to(col1_pos);
+ st->print("%*s", col1_width, _type); // right-justified, therefore width is required.
+
+ fill_to_pos(st, col2_pos);
+ st->print("%s", _name);
+
+ fill_to_pos(st, col4_pos);
+ RangeStrFunc func = NULL;
+ if (is_int()) {
+ func = JVMFlag::get_int_default_range_str;
+ } else if (is_uint()) {
+ func = JVMFlag::get_uint_default_range_str;
+ } else if (is_intx()) {
+ func = JVMFlag::get_intx_default_range_str;
+ } else if (is_uintx()) {
+ func = JVMFlag::get_uintx_default_range_str;
+ } else if (is_uint64_t()) {
+ func = JVMFlag::get_uint64_t_default_range_str;
+ } else if (is_size_t()) {
+ func = JVMFlag::get_size_t_default_range_str;
+ } else if (is_double()) {
+ func = JVMFlag::get_double_default_range_str;
+ } else {
+ st->print("unhandled type %s", _type);
+ st->cr();
+ return;
+ }
+ JVMFlagRangeList::print(st, _name, func);
+
+ fill_to_pos(st, col5_pos);
+ print_kind(st, col5_width);
+
+ fill_to_pos(st, col6_pos);
+ print_origin(st, col6_width);
+
+#ifndef PRODUCT
+ if (withComments) {
+ fill_to_pos(st, col7_pos);
+ st->print("%s", _doc);
+ }
+#endif
+ st->cr();
+ }
+}
+
+void JVMFlag::print_kind(outputStream* st, unsigned int width) {
+ struct Data {
+ int flag;
+ const char* name;
+ };
+
+ Data data[] = {
+ { KIND_JVMCI, "JVMCI" },
+ { KIND_C1, "C1" },
+ { KIND_C2, "C2" },
+ { KIND_ARCH, "ARCH" },
+ { KIND_PLATFORM_DEPENDENT, "pd" },
+ { KIND_PRODUCT, "product" },
+ { KIND_MANAGEABLE, "manageable" },
+ { KIND_DIAGNOSTIC, "diagnostic" },
+ { KIND_EXPERIMENTAL, "experimental" },
+ { KIND_COMMERCIAL, "commercial" },
+ { KIND_NOT_PRODUCT, "notproduct" },
+ { KIND_DEVELOP, "develop" },
+ { KIND_LP64_PRODUCT, "lp64_product" },
+ { KIND_READ_WRITE, "rw" },
+ { -1, "" }
+ };
+
+ if ((_flags & KIND_MASK) != 0) {
+ bool is_first = true;
+ const size_t buffer_size = 64;
+ size_t buffer_used = 0;
+ char kind[buffer_size];
+
+ jio_snprintf(kind, buffer_size, "{");
+ buffer_used++;
+ for (int i = 0; data[i].flag != -1; i++) {
+ Data d = data[i];
+ if ((_flags & d.flag) != 0) {
+ if (is_first) {
+ is_first = false;
+ } else {
+ assert(buffer_used + 1 < buffer_size, "Too small buffer");
+ jio_snprintf(kind + buffer_used, buffer_size - buffer_used, " ");
+ buffer_used++;
+ }
+ size_t length = strlen(d.name);
+ assert(buffer_used + length < buffer_size, "Too small buffer");
+ jio_snprintf(kind + buffer_used, buffer_size - buffer_used, "%s", d.name);
+ buffer_used += length;
+ }
+ }
+ assert(buffer_used + 2 <= buffer_size, "Too small buffer");
+ jio_snprintf(kind + buffer_used, buffer_size - buffer_used, "}");
+ st->print("%*s", width, kind);
+ }
+}
+
+void JVMFlag::print_origin(outputStream* st, unsigned int width) {
+ int origin = _flags & VALUE_ORIGIN_MASK;
+ st->print("{");
+ switch(origin) {
+ case DEFAULT:
+ st->print("default"); break;
+ case COMMAND_LINE:
+ st->print("command line"); break;
+ case ENVIRON_VAR:
+ st->print("environment"); break;
+ case CONFIG_FILE:
+ st->print("config file"); break;
+ case MANAGEMENT:
+ st->print("management"); break;
+ case ERGONOMIC:
+ if (_flags & ORIG_COMMAND_LINE) {
+ st->print("command line, ");
+ }
+ st->print("ergonomic"); break;
+ case ATTACH_ON_DEMAND:
+ st->print("attach"); break;
+ case INTERNAL:
+ st->print("internal"); break;
+ }
+ st->print("}");
+}
+
+void JVMFlag::print_as_flag(outputStream* st) {
+ if (is_bool()) {
+ st->print("-XX:%s%s", get_bool() ? "+" : "-", _name);
+ } else if (is_int()) {
+ st->print("-XX:%s=%d", _name, get_int());
+ } else if (is_uint()) {
+ st->print("-XX:%s=%u", _name, get_uint());
+ } else if (is_intx()) {
+ st->print("-XX:%s=" INTX_FORMAT, _name, get_intx());
+ } else if (is_uintx()) {
+ st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
+ } else if (is_uint64_t()) {
+ st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
+ } else if (is_size_t()) {
+ st->print("-XX:%s=" SIZE_FORMAT, _name, get_size_t());
+ } else if (is_double()) {
+ st->print("-XX:%s=%f", _name, get_double());
+ } else if (is_ccstr()) {
+ st->print("-XX:%s=", _name);
+ const char* cp = get_ccstr();
+ if (cp != NULL) {
+ // Need to turn embedded '\n's back into separate arguments
+ // Not so efficient to print one character at a time,
+ // but the choice is to do the transformation to a buffer
+ // and print that. And this need not be efficient.
+ for (; *cp != '\0'; cp += 1) {
+ switch (*cp) {
+ default:
+ st->print("%c", *cp);
+ break;
+ case '\n':
+ st->print(" -XX:%s=", _name);
+ break;
+ }
+ }
+ }
+ } else {
+ ShouldNotReachHere();
+ }
+}
+
+const char* JVMFlag::flag_error_str(JVMFlag::Error error) {
+ switch (error) {
+ case JVMFlag::MISSING_NAME: return "MISSING_NAME";
+ case JVMFlag::MISSING_VALUE: return "MISSING_VALUE";
+ case JVMFlag::NON_WRITABLE: return "NON_WRITABLE";
+ case JVMFlag::OUT_OF_BOUNDS: return "OUT_OF_BOUNDS";
+ case JVMFlag::VIOLATES_CONSTRAINT: return "VIOLATES_CONSTRAINT";
+ case JVMFlag::INVALID_FLAG: return "INVALID_FLAG";
+ case JVMFlag::ERR_OTHER: return "ERR_OTHER";
+ case JVMFlag::SUCCESS: return "SUCCESS";
+ default: ShouldNotReachHere(); return "NULL";
+ }
+}
+
+// 4991491 do not "optimize out" the was_set false values: omitting them
+// tickles a Microsoft compiler bug causing flagTable to be malformed
+
+#define RUNTIME_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_PRODUCT) },
+#define RUNTIME_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_PRODUCT | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_DIAGNOSTIC) },
+#define RUNTIME_PD_DIAGNOSTIC_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_EXPERIMENTAL) },
+#define RUNTIME_MANAGEABLE_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_MANAGEABLE) },
+#define RUNTIME_PRODUCT_RW_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_PRODUCT | JVMFlag::KIND_READ_WRITE) },
+#define RUNTIME_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_DEVELOP) },
+#define RUNTIME_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_DEVELOP | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define RUNTIME_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_NOT_PRODUCT) },
+
+#define JVMCI_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_PRODUCT) },
+#define JVMCI_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_PRODUCT | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define JVMCI_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_DIAGNOSTIC) },
+#define JVMCI_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define JVMCI_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_EXPERIMENTAL) },
+#define JVMCI_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_DEVELOP) },
+#define JVMCI_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_DEVELOP | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define JVMCI_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_JVMCI | JVMFlag::KIND_NOT_PRODUCT) },
+
+#ifdef _LP64
+#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_LP64_PRODUCT) },
+#else
+#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
+#endif // _LP64
+
+#define C1_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_PRODUCT) },
+#define C1_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_PRODUCT | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define C1_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_DIAGNOSTIC) },
+#define C1_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define C1_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_DEVELOP) },
+#define C1_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_DEVELOP | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define C1_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C1 | JVMFlag::KIND_NOT_PRODUCT) },
+
+#define C2_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_PRODUCT) },
+#define C2_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_PRODUCT | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define C2_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_DIAGNOSTIC) },
+#define C2_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_DIAGNOSTIC | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define C2_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_EXPERIMENTAL) },
+#define C2_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_DEVELOP) },
+#define C2_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_DEVELOP | JVMFlag::KIND_PLATFORM_DEPENDENT) },
+#define C2_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_C2 | JVMFlag::KIND_NOT_PRODUCT) },
+
+#define ARCH_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_ARCH | JVMFlag::KIND_PRODUCT) },
+#define ARCH_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_ARCH | JVMFlag::KIND_DIAGNOSTIC) },
+#define ARCH_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_ARCH | JVMFlag::KIND_EXPERIMENTAL) },
+#define ARCH_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_ARCH | JVMFlag::KIND_DEVELOP) },
+#define ARCH_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) JVMFlag::Flags(JVMFlag::DEFAULT | JVMFlag::KIND_ARCH | JVMFlag::KIND_NOT_PRODUCT) },
+
+static JVMFlag flagTable[] = {
+ VM_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, \
+ RUNTIME_PD_DEVELOP_FLAG_STRUCT, \
+ RUNTIME_PRODUCT_FLAG_STRUCT, \
+ RUNTIME_PD_PRODUCT_FLAG_STRUCT, \
+ RUNTIME_DIAGNOSTIC_FLAG_STRUCT, \
+ RUNTIME_PD_DIAGNOSTIC_FLAG_STRUCT, \
+ RUNTIME_EXPERIMENTAL_FLAG_STRUCT, \
+ RUNTIME_NOTPRODUCT_FLAG_STRUCT, \
+ RUNTIME_MANAGEABLE_FLAG_STRUCT, \
+ RUNTIME_PRODUCT_RW_FLAG_STRUCT, \
+ RUNTIME_LP64_PRODUCT_FLAG_STRUCT, \
+ IGNORE_RANGE, \
+ IGNORE_CONSTRAINT, \
+ IGNORE_WRITEABLE)
+
+ RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, \
+ RUNTIME_PD_DEVELOP_FLAG_STRUCT, \
+ RUNTIME_PRODUCT_FLAG_STRUCT, \
+ RUNTIME_PD_PRODUCT_FLAG_STRUCT, \
+ RUNTIME_DIAGNOSTIC_FLAG_STRUCT, \
+ RUNTIME_PD_DIAGNOSTIC_FLAG_STRUCT, \
+ RUNTIME_NOTPRODUCT_FLAG_STRUCT, \
+ IGNORE_RANGE, \
+ IGNORE_CONSTRAINT, \
+ IGNORE_WRITEABLE)
+#if INCLUDE_JVMCI
+ JVMCI_FLAGS(JVMCI_DEVELOP_FLAG_STRUCT, \
+ JVMCI_PD_DEVELOP_FLAG_STRUCT, \
+ JVMCI_PRODUCT_FLAG_STRUCT, \
+ JVMCI_PD_PRODUCT_FLAG_STRUCT, \
+ JVMCI_DIAGNOSTIC_FLAG_STRUCT, \
+ JVMCI_PD_DIAGNOSTIC_FLAG_STRUCT, \
+ JVMCI_EXPERIMENTAL_FLAG_STRUCT, \
+ JVMCI_NOTPRODUCT_FLAG_STRUCT, \
+ IGNORE_RANGE, \
+ IGNORE_CONSTRAINT, \
+ IGNORE_WRITEABLE)
+#endif // INCLUDE_JVMCI
+#ifdef COMPILER1
+ C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, \
+ C1_PD_DEVELOP_FLAG_STRUCT, \
+ C1_PRODUCT_FLAG_STRUCT, \
+ C1_PD_PRODUCT_FLAG_STRUCT, \
+ C1_DIAGNOSTIC_FLAG_STRUCT, \
+ C1_PD_DIAGNOSTIC_FLAG_STRUCT, \
+ C1_NOTPRODUCT_FLAG_STRUCT, \
+ IGNORE_RANGE, \
+ IGNORE_CONSTRAINT, \
+ IGNORE_WRITEABLE)
+#endif // COMPILER1
+#ifdef COMPILER2
+ C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, \
+ C2_PD_DEVELOP_FLAG_STRUCT, \
+ C2_PRODUCT_FLAG_STRUCT, \
+ C2_PD_PRODUCT_FLAG_STRUCT, \
+ C2_DIAGNOSTIC_FLAG_STRUCT, \
+ C2_PD_DIAGNOSTIC_FLAG_STRUCT, \
+ C2_EXPERIMENTAL_FLAG_STRUCT, \
+ C2_NOTPRODUCT_FLAG_STRUCT, \
+ IGNORE_RANGE, \
+ IGNORE_CONSTRAINT, \
+ IGNORE_WRITEABLE)
+#endif // COMPILER2
+ ARCH_FLAGS(ARCH_DEVELOP_FLAG_STRUCT, \
+ ARCH_PRODUCT_FLAG_STRUCT, \
+ ARCH_DIAGNOSTIC_FLAG_STRUCT, \
+ ARCH_EXPERIMENTAL_FLAG_STRUCT, \
+ ARCH_NOTPRODUCT_FLAG_STRUCT, \
+ IGNORE_RANGE, \
+ IGNORE_CONSTRAINT, \
+ IGNORE_WRITEABLE)
+ FLAGTABLE_EXT
+ {0, NULL, NULL}
+};
+
+JVMFlag* JVMFlag::flags = flagTable;
+size_t JVMFlag::numFlags = (sizeof(flagTable) / sizeof(JVMFlag));
+
+inline bool str_equal(const char* s, size_t s_len, const char* q, size_t q_len) {
+ if (s_len != q_len) return false;
+ return memcmp(s, q, q_len) == 0;
+}
+
+// Search the flag table for a named flag
+JVMFlag* JVMFlag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
+ for (JVMFlag* current = &flagTable[0]; current->_name != NULL; current++) {
+ if (str_equal(current->_name, current->get_name_length(), name, length)) {
+ // Found a matching entry.
+ // Don't report notproduct and develop flags in product builds.
+ if (current->is_constant_in_binary()) {
+ return (return_flag ? current : NULL);
+ }
+ // Report locked flags only if allowed.
+ if (!(current->is_unlocked() || current->is_unlocker())) {
+ if (!allow_locked) {
+ // disable use of locked flags, e.g. diagnostic, experimental,
+ // commercial... until they are explicitly unlocked
+ return NULL;
+ }
+ }
+ return current;
+ }
+ }
+ // JVMFlag name is not in the flag table
+ return NULL;
+}
+
+// Get or compute the flag name length
+size_t JVMFlag::get_name_length() {
+ if (_name_len == 0) {
+ _name_len = strlen(_name);
+ }
+ return _name_len;
+}
+
+JVMFlag* JVMFlag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
+ float VMOptionsFuzzyMatchSimilarity = 0.7f;
+ JVMFlag* match = NULL;
+ float score;
+ float max_score = -1;
+
+ for (JVMFlag* current = &flagTable[0]; current->_name != NULL; current++) {
+ score = StringUtils::similarity(current->_name, strlen(current->_name), name, length);
+ if (score > max_score) {
+ max_score = score;
+ match = current;
+ }
+ }
+
+ if (!(match->is_unlocked() || match->is_unlocker())) {
+ if (!allow_locked) {
+ return NULL;
+ }
+ }
+
+ if (max_score < VMOptionsFuzzyMatchSimilarity) {
+ return NULL;
+ }
+
+ return match;
+}
+
+// Returns the address of the index'th element
+static JVMFlag* address_of_flag(JVMFlagsWithType flag) {
+ assert((size_t)flag < JVMFlag::numFlags, "bad command line flag index");
+ return &JVMFlag::flags[flag];
+}
+
+bool JVMFlagEx::is_default(JVMFlags flag) {
+ assert((size_t)flag < JVMFlag::numFlags, "bad command line flag index");
+ JVMFlag* f = &JVMFlag::flags[flag];
+ return f->is_default();
+}
+
+bool JVMFlagEx::is_ergo(JVMFlags flag) {
+ assert((size_t)flag < JVMFlag::numFlags, "bad command line flag index");
+ JVMFlag* f = &JVMFlag::flags[flag];
+ return f->is_ergonomic();
+}
+
+bool JVMFlagEx::is_cmdline(JVMFlags flag) {
+ assert((size_t)flag < JVMFlag::numFlags, "bad command line flag index");
+ JVMFlag* f = &JVMFlag::flags[flag];
+ return f->is_command_line();
+}
+
+bool JVMFlag::wasSetOnCmdline(const char* name, bool* value) {
+ JVMFlag* result = JVMFlag::find_flag((char*)name, strlen(name));
+ if (result == NULL) return false;
+ *value = result->is_command_line();
+ return true;
+}
+
+void JVMFlagEx::setOnCmdLine(JVMFlagsWithType flag) {
+ JVMFlag* faddr = address_of_flag(flag);
+ assert(faddr != NULL, "Unknown flag");
+ faddr->set_command_line();
+}
+
+template<class E, class T>
+static void trace_flag_changed(const char* name, const T old_value, const T new_value, const JVMFlag::Flags origin) {
+ E e;
+ e.set_name(name);
+ e.set_oldValue(old_value);
+ e.set_newValue(new_value);
+ e.set_origin(origin);
+ e.commit();
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_bool(const char* name, bool new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_bool(new_value, verbose);
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_bool()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_bool();
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error JVMFlag::boolAtPut(JVMFlag* flag, bool* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_bool()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_bool(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ bool old_value = flag->get_bool();
+ trace_flag_changed<EventBooleanFlagChanged, bool>(name, old_value, *value, origin);
+ check = flag->set_bool(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::boolAtPut(const char* name, size_t len, bool* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return boolAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::boolAtPut(JVMFlagsWithType flag, bool value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
+ return JVMFlag::boolAtPut(faddr, &value, origin);
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_int(const char* name, int new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_int(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_int(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::intAt(const char* name, size_t len, int* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_int()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_int();
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error JVMFlag::intAtPut(JVMFlag* flag, int* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_int()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_int(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ int old_value = flag->get_int();
+ trace_flag_changed<EventIntFlagChanged, s4>(name, old_value, *value, origin);
+ check = flag->set_int(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::intAtPut(const char* name, size_t len, int* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return intAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::intAtPut(JVMFlagsWithType flag, int value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_int(), "wrong flag type");
+ return JVMFlag::intAtPut(faddr, &value, origin);
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_uint(const char* name, uint new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_uint(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_uint(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::uintAt(const char* name, size_t len, uint* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_uint()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_uint();
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error JVMFlag::uintAtPut(JVMFlag* flag, uint* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_uint()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_uint(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ uint old_value = flag->get_uint();
+ trace_flag_changed<EventUnsignedIntFlagChanged, u4>(name, old_value, *value, origin);
+ check = flag->set_uint(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::uintAtPut(const char* name, size_t len, uint* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return uintAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::uintAtPut(JVMFlagsWithType flag, uint value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_uint(), "wrong flag type");
+ return JVMFlag::uintAtPut(faddr, &value, origin);
+}
+
+JVMFlag::Error JVMFlag::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_intx()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_intx();
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_intx(const char* name, intx new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_intx(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_intx(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::intxAtPut(JVMFlag* flag, intx* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_intx()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_intx(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ intx old_value = flag->get_intx();
+ trace_flag_changed<EventLongFlagChanged, intx>(name, old_value, *value, origin);
+ check = flag->set_intx(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::intxAtPut(const char* name, size_t len, intx* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return intxAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::intxAtPut(JVMFlagsWithType flag, intx value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
+ return JVMFlag::intxAtPut(faddr, &value, origin);
+}
+
+JVMFlag::Error JVMFlag::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_uintx()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_uintx();
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_uintx(const char* name, uintx new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_uintx(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_uintx(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::uintxAtPut(JVMFlag* flag, uintx* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_uintx()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_uintx(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ uintx old_value = flag->get_uintx();
+ trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
+ check = flag->set_uintx(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::uintxAtPut(const char* name, size_t len, uintx* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return uintxAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::uintxAtPut(JVMFlagsWithType flag, uintx value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
+ return JVMFlag::uintxAtPut(faddr, &value, origin);
+}
+
+JVMFlag::Error JVMFlag::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_uint64_t()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_uint64_t();
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_uint64_t(const char* name, uint64_t new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_uint64_t(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_uint64_t(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::uint64_tAtPut(JVMFlag* flag, uint64_t* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_uint64_t()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_uint64_t(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ uint64_t old_value = flag->get_uint64_t();
+ trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
+ check = flag->set_uint64_t(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::uint64_tAtPut(const char* name, size_t len, uint64_t* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return uint64_tAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::uint64_tAtPut(JVMFlagsWithType flag, uint64_t value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
+ return JVMFlag::uint64_tAtPut(faddr, &value, origin);
+}
+
+JVMFlag::Error JVMFlag::size_tAt(const char* name, size_t len, size_t* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_size_t()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_size_t();
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_size_t(const char* name, size_t new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_size_t(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_size_t(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+
+JVMFlag::Error JVMFlag::size_tAtPut(JVMFlag* flag, size_t* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_size_t()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_size_t(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ size_t old_value = flag->get_size_t();
+ trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
+ check = flag->set_size_t(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::size_tAtPut(const char* name, size_t len, size_t* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return size_tAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::size_tAtPut(JVMFlagsWithType flag, size_t value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_size_t(), "wrong flag type");
+ return JVMFlag::size_tAtPut(faddr, &value, origin);
+}
+
+JVMFlag::Error JVMFlag::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_double()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_double();
+ return JVMFlag::SUCCESS;
+}
+
+static JVMFlag::Error apply_constraint_and_check_range_double(const char* name, double new_value, bool verbose) {
+ JVMFlag::Error status = JVMFlag::SUCCESS;
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ status = range->check_double(new_value, verbose);
+ }
+ if (status == JVMFlag::SUCCESS) {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find_if_needs_check(name);
+ if (constraint != NULL) {
+ status = constraint->apply_double(new_value, verbose);
+ }
+ }
+ return status;
+}
+
+JVMFlag::Error JVMFlag::doubleAtPut(JVMFlag* flag, double* value, JVMFlag::Flags origin) {
+ const char* name;
+ if (flag == NULL) return JVMFlag::INVALID_FLAG;
+ if (!flag->is_double()) return JVMFlag::WRONG_FORMAT;
+ name = flag->_name;
+ JVMFlag::Error check = apply_constraint_and_check_range_double(name, *value, !JVMFlagConstraintList::validated_after_ergo());
+ if (check != JVMFlag::SUCCESS) return check;
+ double old_value = flag->get_double();
+ trace_flag_changed<EventDoubleFlagChanged, double>(name, old_value, *value, origin);
+ check = flag->set_double(*value);
+ *value = old_value;
+ flag->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlag::doubleAtPut(const char* name, size_t len, double* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ return doubleAtPut(result, value, origin);
+}
+
+JVMFlag::Error JVMFlagEx::doubleAtPut(JVMFlagsWithType flag, double value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
+ return JVMFlag::doubleAtPut(faddr, &value, origin);
+}
+
+JVMFlag::Error JVMFlag::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
+ JVMFlag* result = JVMFlag::find_flag(name, len, allow_locked, return_flag);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_ccstr()) return JVMFlag::WRONG_FORMAT;
+ *value = result->get_ccstr();
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error JVMFlag::ccstrAtPut(const char* name, size_t len, ccstr* value, JVMFlag::Flags origin) {
+ JVMFlag* result = JVMFlag::find_flag(name, len);
+ if (result == NULL) return JVMFlag::INVALID_FLAG;
+ if (!result->is_ccstr()) return JVMFlag::WRONG_FORMAT;
+ ccstr old_value = result->get_ccstr();
+ trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
+ char* new_value = NULL;
+ if (*value != NULL) {
+ new_value = os::strdup_check_oom(*value);
+ }
+ JVMFlag::Error check = result->set_ccstr(new_value);
+ if (result->is_default() && old_value != NULL) {
+ // Prior value is NOT heap allocated, but was a literal constant.
+ old_value = os::strdup_check_oom(old_value);
+ }
+ *value = old_value;
+ result->set_origin(origin);
+ return check;
+}
+
+JVMFlag::Error JVMFlagEx::ccstrAtPut(JVMFlagsWithType flag, ccstr value, JVMFlag::Flags origin) {
+ JVMFlag* faddr = address_of_flag(flag);
+ guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
+ ccstr old_value = faddr->get_ccstr();
+ trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
+ char* new_value = os::strdup_check_oom(value);
+ JVMFlag::Error check = faddr->set_ccstr(new_value);
+ if (!faddr->is_default() && old_value != NULL) {
+ // Prior value is heap allocated so free it.
+ FREE_C_HEAP_ARRAY(char, old_value);
+ }
+ faddr->set_origin(origin);
+ return check;
+}
+
+extern "C" {
+ static int compare_flags(const void* void_a, const void* void_b) {
+ return strcmp((*((JVMFlag**) void_a))->_name, (*((JVMFlag**) void_b))->_name);
+ }
+}
+
+void JVMFlag::printSetFlags(outputStream* out) {
+ // Print which flags were set on the command line
+ // note: this method is called before the thread structure is in place
+ // which means resource allocation cannot be used.
+
+ // The last entry is the null entry.
+ const size_t length = JVMFlag::numFlags - 1;
+
+ // Sort
+ JVMFlag** array = NEW_C_HEAP_ARRAY(JVMFlag*, length, mtArguments);
+ for (size_t i = 0; i < length; i++) {
+ array[i] = &flagTable[i];
+ }
+ qsort(array, length, sizeof(JVMFlag*), compare_flags);
+
+ // Print
+ for (size_t i = 0; i < length; i++) {
+ if (array[i]->get_origin() /* naked field! */) {
+ array[i]->print_as_flag(out);
+ out->print(" ");
+ }
+ }
+ out->cr();
+ FREE_C_HEAP_ARRAY(JVMFlag*, array);
+}
+
+#ifndef PRODUCT
+
+void JVMFlag::verify() {
+ assert(Arguments::check_vm_args_consistency(), "Some flag settings conflict");
+}
+
+#endif // PRODUCT
+
+void JVMFlag::printFlags(outputStream* out, bool withComments, bool printRanges) {
+ // Print the flags sorted by name
+ // note: this method is called before the thread structure is in place
+ // which means resource allocation cannot be used.
+
+ // The last entry is the null entry.
+ const size_t length = JVMFlag::numFlags - 1;
+
+ // Sort
+ JVMFlag** array = NEW_C_HEAP_ARRAY(JVMFlag*, length, mtArguments);
+ for (size_t i = 0; i < length; i++) {
+ array[i] = &flagTable[i];
+ }
+ qsort(array, length, sizeof(JVMFlag*), compare_flags);
+
+ // Print
+ if (!printRanges) {
+ out->print_cr("[Global flags]");
+ } else {
+ out->print_cr("[Global flags ranges]");
+ }
+
+ for (size_t i = 0; i < length; i++) {
+ if (array[i]->is_unlocked()) {
+ array[i]->print_on(out, withComments, printRanges);
+ }
+ }
+ FREE_C_HEAP_ARRAY(JVMFlag*, array);
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlag.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,283 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_FLAGS_JVMFLAG_HPP
+#define SHARE_VM_RUNTIME_FLAGS_JVMFLAG_HPP
+
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+// function type that will construct default range string
+typedef const char* (*RangeStrFunc)(void);
+
+struct JVMFlag {
+ enum Flags {
+ // latest value origin
+ DEFAULT = 0,
+ COMMAND_LINE = 1,
+ ENVIRON_VAR = 2,
+ CONFIG_FILE = 3,
+ MANAGEMENT = 4,
+ ERGONOMIC = 5,
+ ATTACH_ON_DEMAND = 6,
+ INTERNAL = 7,
+
+ LAST_VALUE_ORIGIN = INTERNAL,
+ VALUE_ORIGIN_BITS = 4,
+ VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS),
+
+ // flag kind
+ KIND_PRODUCT = 1 << 4,
+ KIND_MANAGEABLE = 1 << 5,
+ KIND_DIAGNOSTIC = 1 << 6,
+ KIND_EXPERIMENTAL = 1 << 7,
+ KIND_NOT_PRODUCT = 1 << 8,
+ KIND_DEVELOP = 1 << 9,
+ KIND_PLATFORM_DEPENDENT = 1 << 10,
+ KIND_READ_WRITE = 1 << 11,
+ KIND_C1 = 1 << 12,
+ KIND_C2 = 1 << 13,
+ KIND_ARCH = 1 << 14,
+ KIND_LP64_PRODUCT = 1 << 15,
+ KIND_COMMERCIAL = 1 << 16,
+ KIND_JVMCI = 1 << 17,
+
+ // set this bit if the flag was set on the command line
+ ORIG_COMMAND_LINE = 1 << 18,
+
+ KIND_MASK = ~(VALUE_ORIGIN_MASK | ORIG_COMMAND_LINE)
+ };
+
+ enum Error {
+ // no error
+ SUCCESS = 0,
+ // flag name is missing
+ MISSING_NAME,
+ // flag value is missing
+ MISSING_VALUE,
+ // error parsing the textual form of the value
+ WRONG_FORMAT,
+ // flag is not writable
+ NON_WRITABLE,
+ // flag value is outside of its bounds
+ OUT_OF_BOUNDS,
+ // flag value violates its constraint
+ VIOLATES_CONSTRAINT,
+ // there is no flag with the given name
+ INVALID_FLAG,
+ // the flag can only be set only on command line during invocation of the VM
+ COMMAND_LINE_ONLY,
+ // the flag may only be set once
+ SET_ONLY_ONCE,
+ // the flag is not writable in this combination of product/debug build
+ CONSTANT,
+ // other, unspecified error related to setting the flag
+ ERR_OTHER
+ };
+
+ enum MsgType {
+ NONE = 0,
+ DIAGNOSTIC_FLAG_BUT_LOCKED,
+ EXPERIMENTAL_FLAG_BUT_LOCKED,
+ DEVELOPER_FLAG_BUT_PRODUCT_BUILD,
+ NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD,
+ COMMERCIAL_FLAG_BUT_DISABLED,
+ COMMERCIAL_FLAG_BUT_LOCKED
+ };
+
+ const char* _type;
+ const char* _name;
+ void* _addr;
+ NOT_PRODUCT(const char* _doc;)
+ Flags _flags;
+ size_t _name_len;
+
+ // points to all Flags static array
+ static JVMFlag* flags;
+
+ // number of flags
+ static size_t numFlags;
+
+ static JVMFlag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); };
+ static JVMFlag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
+
+ static const char* get_int_default_range_str();
+ static const char* get_uint_default_range_str();
+ static const char* get_intx_default_range_str();
+ static const char* get_uintx_default_range_str();
+ static const char* get_uint64_t_default_range_str();
+ static const char* get_size_t_default_range_str();
+ static const char* get_double_default_range_str();
+
+ JVMFlag::Error check_writable(bool changed);
+
+ bool is_bool() const;
+ bool get_bool() const;
+ JVMFlag::Error set_bool(bool value);
+
+ bool is_int() const;
+ int get_int() const;
+ JVMFlag::Error set_int(int value);
+
+ bool is_uint() const;
+ uint get_uint() const;
+ JVMFlag::Error set_uint(uint value);
+
+ bool is_intx() const;
+ intx get_intx() const;
+ JVMFlag::Error set_intx(intx value);
+
+ bool is_uintx() const;
+ uintx get_uintx() const;
+ JVMFlag::Error set_uintx(uintx value);
+
+ bool is_uint64_t() const;
+ uint64_t get_uint64_t() const;
+ JVMFlag::Error set_uint64_t(uint64_t value);
+
+ bool is_size_t() const;
+ size_t get_size_t() const;
+ JVMFlag::Error set_size_t(size_t value);
+
+ bool is_double() const;
+ double get_double() const;
+ JVMFlag::Error set_double(double value);
+
+ bool is_ccstr() const;
+ bool ccstr_accumulates() const;
+ ccstr get_ccstr() const;
+ JVMFlag::Error set_ccstr(ccstr value);
+
+ Flags get_origin();
+ void set_origin(Flags origin);
+
+ size_t get_name_length();
+
+ bool is_default();
+ bool is_ergonomic();
+ bool is_command_line();
+ void set_command_line();
+
+ bool is_product() const;
+ bool is_manageable() const;
+ bool is_diagnostic() const;
+ bool is_experimental() const;
+ bool is_notproduct() const;
+ bool is_develop() const;
+ bool is_read_write() const;
+ bool is_commercial() const;
+
+ bool is_constant_in_binary() const;
+
+ bool is_unlocker() const;
+ bool is_unlocked() const;
+ bool is_writeable() const;
+ bool is_external() const;
+
+ bool is_unlocker_ext() const;
+ bool is_unlocked_ext() const;
+ bool is_writeable_ext() const;
+ bool is_external_ext() const;
+
+ void clear_diagnostic();
+
+ JVMFlag::MsgType get_locked_message(char*, int) const;
+ JVMFlag::MsgType get_locked_message_ext(char*, int) const;
+
+ // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
+ void print_on(outputStream* st, bool withComments = false, bool printRanges = false);
+ void print_kind(outputStream* st, unsigned int width);
+ void print_origin(outputStream* st, unsigned int width);
+ void print_as_flag(outputStream* st);
+
+ static const char* flag_error_str(JVMFlag::Error error);
+
+public:
+ static JVMFlag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error boolAtPut(JVMFlag* flag, bool* value, JVMFlag::Flags origin);
+ static JVMFlag::Error boolAtPut(const char* name, size_t len, bool* value, JVMFlag::Flags origin);
+ static JVMFlag::Error boolAtPut(const char* name, bool* value, JVMFlag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error intAtPut(JVMFlag* flag, int* value, JVMFlag::Flags origin);
+ static JVMFlag::Error intAtPut(const char* name, size_t len, int* value, JVMFlag::Flags origin);
+ static JVMFlag::Error intAtPut(const char* name, int* value, JVMFlag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error uintAtPut(JVMFlag* flag, uint* value, JVMFlag::Flags origin);
+ static JVMFlag::Error uintAtPut(const char* name, size_t len, uint* value, JVMFlag::Flags origin);
+ static JVMFlag::Error uintAtPut(const char* name, uint* value, JVMFlag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error intxAtPut(JVMFlag* flag, intx* value, JVMFlag::Flags origin);
+ static JVMFlag::Error intxAtPut(const char* name, size_t len, intx* value, JVMFlag::Flags origin);
+ static JVMFlag::Error intxAtPut(const char* name, intx* value, JVMFlag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error uintxAtPut(JVMFlag* flag, uintx* value, JVMFlag::Flags origin);
+ static JVMFlag::Error uintxAtPut(const char* name, size_t len, uintx* value, JVMFlag::Flags origin);
+ static JVMFlag::Error uintxAtPut(const char* name, uintx* value, JVMFlag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error size_tAtPut(JVMFlag* flag, size_t* value, JVMFlag::Flags origin);
+ static JVMFlag::Error size_tAtPut(const char* name, size_t len, size_t* value, JVMFlag::Flags origin);
+ static JVMFlag::Error size_tAtPut(const char* name, size_t* value, JVMFlag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error uint64_tAtPut(JVMFlag* flag, uint64_t* value, JVMFlag::Flags origin);
+ static JVMFlag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, JVMFlag::Flags origin);
+ static JVMFlag::Error uint64_tAtPut(const char* name, uint64_t* value, JVMFlag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
+ static JVMFlag::Error doubleAtPut(JVMFlag* flag, double* value, JVMFlag::Flags origin);
+ static JVMFlag::Error doubleAtPut(const char* name, size_t len, double* value, JVMFlag::Flags origin);
+ static JVMFlag::Error doubleAtPut(const char* name, double* value, JVMFlag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
+
+ static JVMFlag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
+ static JVMFlag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
+ // Contract: JVMFlag will make private copy of the incoming value.
+ // Outgoing value is always malloc-ed, and caller MUST call free.
+ static JVMFlag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, JVMFlag::Flags origin);
+ static JVMFlag::Error ccstrAtPut(const char* name, ccstr* value, JVMFlag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
+
+ // Returns false if name is not a command line flag.
+ static bool wasSetOnCmdline(const char* name, bool* value);
+ static void printSetFlags(outputStream* out);
+
+ // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
+ static void printFlags(outputStream* out, bool withComments, bool printRanges = false);
+
+ static void verify() PRODUCT_RETURN;
+};
+
+#endif // SHARE_VM_RUNTIME_FLAGS_JVMFLAG_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintList.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
+#include "gc/shared/jvmFlagConstraintsGC.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagConstraintsCompiler.hpp"
+#include "runtime/flags/jvmFlagConstraintsRuntime.hpp"
+#include "runtime/os.hpp"
+#include "utilities/macros.hpp"
+#ifdef COMPILER1
+#include "c1/c1_globals.hpp"
+#endif
+#ifdef COMPILER2
+#include "opto/c2_globals.hpp"
+#endif
+
+class JVMFlagConstraint_bool : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_bool _constraint;
+ const bool* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_bool(const char* name, const bool* ptr,
+ JVMFlagConstraintFunc_bool func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ bool value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_bool(bool value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_int : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_int _constraint;
+ const int* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_int(const char* name, const int* ptr,
+ JVMFlagConstraintFunc_int func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ int value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_int(int value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_intx : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_intx _constraint;
+ const intx* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_intx(const char* name, const intx* ptr,
+ JVMFlagConstraintFunc_intx func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ intx value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_intx(intx value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_uint : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_uint _constraint;
+ const uint* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_uint(const char* name, const uint* ptr,
+ JVMFlagConstraintFunc_uint func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ uint value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_uint(uint value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_uintx : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_uintx _constraint;
+ const uintx* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_uintx(const char* name, const uintx* ptr,
+ JVMFlagConstraintFunc_uintx func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ uintx value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_uintx(uintx value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_uint64_t : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_uint64_t _constraint;
+ const uint64_t* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_uint64_t(const char* name, const uint64_t* ptr,
+ JVMFlagConstraintFunc_uint64_t func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ uint64_t value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_uint64_t(uint64_t value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_size_t : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_size_t _constraint;
+ const size_t* _ptr;
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_size_t(const char* name, const size_t* ptr,
+ JVMFlagConstraintFunc_size_t func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ size_t value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_size_t(size_t value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+class JVMFlagConstraint_double : public JVMFlagConstraint {
+ JVMFlagConstraintFunc_double _constraint;
+ const double* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint_double(const char* name, const double* ptr,
+ JVMFlagConstraintFunc_double func,
+ ConstraintType type) : JVMFlagConstraint(name, type), _constraint(func), _ptr(ptr) {}
+
+ JVMFlag::Error apply(bool verbose) {
+ double value = *_ptr;
+ return _constraint(value, verbose);
+ }
+
+ JVMFlag::Error apply_double(double value, bool verbose) {
+ return _constraint(value, verbose);
+ }
+};
+
+// No constraint emitting
+void emit_constraint_no(...) { /* NOP */ }
+
+// No constraint emitting if function argument is NOT provided
+void emit_constraint_bool(const char* /*name*/, const bool* /*value*/) { /* NOP */ }
+void emit_constraint_ccstr(const char* /*name*/, const ccstr* /*value*/) { /* NOP */ }
+void emit_constraint_ccstrlist(const char* /*name*/, const ccstrlist* /*value*/) { /* NOP */ }
+void emit_constraint_int(const char* /*name*/, const int* /*value*/) { /* NOP */ }
+void emit_constraint_intx(const char* /*name*/, const intx* /*value*/) { /* NOP */ }
+void emit_constraint_uint(const char* /*name*/, const uint* /*value*/) { /* NOP */ }
+void emit_constraint_uintx(const char* /*name*/, const uintx* /*value*/) { /* NOP */ }
+void emit_constraint_uint64_t(const char* /*name*/, const uint64_t* /*value*/) { /* NOP */ }
+void emit_constraint_size_t(const char* /*name*/, const size_t* /*value*/) { /* NOP */ }
+void emit_constraint_double(const char* /*name*/, const double* /*value*/) { /* NOP */ }
+
+// JVMFlagConstraint emitting code functions if function argument is provided
+void emit_constraint_bool(const char* name, const bool* ptr, JVMFlagConstraintFunc_bool func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_bool(name, ptr, func, type));
+}
+void emit_constraint_int(const char* name, const int* ptr, JVMFlagConstraintFunc_int func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_int(name, ptr, func, type));
+}
+void emit_constraint_intx(const char* name, const intx* ptr, JVMFlagConstraintFunc_intx func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_intx(name, ptr, func, type));
+}
+void emit_constraint_uint(const char* name, const uint* ptr, JVMFlagConstraintFunc_uint func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_uint(name, ptr, func, type));
+}
+void emit_constraint_uintx(const char* name, const uintx* ptr, JVMFlagConstraintFunc_uintx func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_uintx(name, ptr, func, type));
+}
+void emit_constraint_uint64_t(const char* name, const uint64_t* ptr, JVMFlagConstraintFunc_uint64_t func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_uint64_t(name, ptr, func, type));
+}
+void emit_constraint_size_t(const char* name, const size_t* ptr, JVMFlagConstraintFunc_size_t func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_size_t(name, ptr, func, type));
+}
+void emit_constraint_double(const char* name, const double* ptr, JVMFlagConstraintFunc_double func, JVMFlagConstraint::ConstraintType type) {
+ JVMFlagConstraintList::add(new JVMFlagConstraint_double(name, ptr, func, type));
+}
+
+// Generate code to call emit_constraint_xxx function
+#define EMIT_CONSTRAINT_PRODUCT_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_COMMERCIAL_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_DIAGNOSTIC_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_EXPERIMENTAL_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_MANAGEABLE_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_PRODUCT_RW_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_PD_PRODUCT_FLAG(type, name, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG(type, name, doc) ); emit_constraint_##type(#name,&name
+#ifndef PRODUCT
+#define EMIT_CONSTRAINT_DEVELOPER_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_PD_DEVELOPER_FLAG(type, name, doc) ); emit_constraint_##type(#name,&name
+#define EMIT_CONSTRAINT_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#else
+#define EMIT_CONSTRAINT_DEVELOPER_FLAG(type, name, value, doc) ); emit_constraint_no(#name,&name
+#define EMIT_CONSTRAINT_PD_DEVELOPER_FLAG(type, name, doc) ); emit_constraint_no(#name,&name
+#define EMIT_CONSTRAINT_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_constraint_no(#name,&name
+#endif
+#ifdef _LP64
+#define EMIT_CONSTRAINT_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_constraint_##type(#name,&name
+#else
+#define EMIT_CONSTRAINT_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_constraint_no(#name,&name
+#endif
+
+// Generate func argument to pass into emit_constraint_xxx functions
+#define EMIT_CONSTRAINT_CHECK(func, type) , func, JVMFlagConstraint::type
+
+// the "name" argument must be a string literal
+#define INITIAL_CONSTRAINTS_SIZE 72
+GrowableArray<JVMFlagConstraint*>* JVMFlagConstraintList::_constraints = NULL;
+JVMFlagConstraint::ConstraintType JVMFlagConstraintList::_validating_type = JVMFlagConstraint::AtParse;
+
+// Check the ranges of all flags that have them or print them out and exit if requested
+void JVMFlagConstraintList::init(void) {
+ _constraints = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagConstraint*>(INITIAL_CONSTRAINTS_SIZE, true);
+
+ emit_constraint_no(NULL VM_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
+ EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
+ EMIT_CONSTRAINT_MANAGEABLE_FLAG,
+ EMIT_CONSTRAINT_PRODUCT_RW_FLAG,
+ EMIT_CONSTRAINT_LP64_PRODUCT_FLAG,
+ IGNORE_RANGE,
+ EMIT_CONSTRAINT_CHECK,
+ IGNORE_WRITEABLE));
+
+ EMIT_CONSTRAINTS_FOR_GLOBALS_EXT
+
+ emit_constraint_no(NULL ARCH_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
+ EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ EMIT_CONSTRAINT_CHECK,
+ IGNORE_WRITEABLE));
+
+
+#ifdef COMPILER1
+ emit_constraint_no(NULL C1_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ EMIT_CONSTRAINT_CHECK,
+ IGNORE_WRITEABLE));
+#endif // COMPILER1
+
+#ifdef COMPILER2
+ emit_constraint_no(NULL C2_FLAGS(EMIT_CONSTRAINT_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PD_DEVELOPER_FLAG,
+ EMIT_CONSTRAINT_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_PD_PRODUCT_FLAG,
+ EMIT_CONSTRAINT_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_PD_DIAGNOSTIC_FLAG,
+ EMIT_CONSTRAINT_EXPERIMENTAL_FLAG,
+ EMIT_CONSTRAINT_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ EMIT_CONSTRAINT_CHECK,
+ IGNORE_WRITEABLE));
+#endif // COMPILER2
+}
+
+JVMFlagConstraint* JVMFlagConstraintList::find(const char* name) {
+ JVMFlagConstraint* found = NULL;
+ for (int i=0; i<length(); i++) {
+ JVMFlagConstraint* constraint = at(i);
+ if (strcmp(constraint->name(), name) == 0) {
+ found = constraint;
+ break;
+ }
+ }
+ return found;
+}
+
+// Find constraints by name and return only if found constraint's type is equal or lower than current validating type.
+JVMFlagConstraint* JVMFlagConstraintList::find_if_needs_check(const char* name) {
+ JVMFlagConstraint* found = NULL;
+ JVMFlagConstraint* constraint = find(name);
+ if (constraint && (constraint->type() <= _validating_type)) {
+ found = constraint;
+ }
+ return found;
+}
+
+// Check constraints for specific constraint type.
+bool JVMFlagConstraintList::check_constraints(JVMFlagConstraint::ConstraintType type) {
+ guarantee(type > _validating_type, "Constraint check is out of order.");
+ _validating_type = type;
+
+ bool status = true;
+ for (int i=0; i<length(); i++) {
+ JVMFlagConstraint* constraint = at(i);
+ if (type != constraint->type()) continue;
+ if (constraint->apply(true) != JVMFlag::SUCCESS) status = false;
+ }
+ return status;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintList.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTLIST_HPP
+#define SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTLIST_HPP
+
+#include "runtime/flags/jvmFlag.hpp"
+#include "utilities/growableArray.hpp"
+
+/*
+ * Here we have a mechanism for extracting constraints (as custom functions) for flags,
+ * which otherwise can not be expressed via simple range check, specified in flag macro tables.
+ *
+ * An example of a constraint is "flag1 < flag2" where both flag1 and flag2 can change.
+ *
+ * See runtime "runtime/flags/jvmFlagConstraintsCompiler.hpp",
+ * "runtime/flags/jvmFlagConstraintsGC.hpp" and
+ * "runtime/flags/jvmFlagConstraintsRuntime.hpp" for the functions themselves.
+ */
+
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_bool)(bool value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_int)(int value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_intx)(intx value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_uint)(uint value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_uintx)(uintx value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_uint64_t)(uint64_t value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_size_t)(size_t value, bool verbose);
+typedef JVMFlag::Error (*JVMFlagConstraintFunc_double)(double value, bool verbose);
+
+class JVMFlagConstraint : public CHeapObj<mtArguments> {
+public:
+ // During VM initialization, constraint validation will be done order of ConstraintType.
+ enum ConstraintType {
+ // Will be validated during argument processing (Arguments::parse_argument).
+ AtParse = 0,
+ // Will be validated inside Threads::create_vm(), right after Arguments::apply_ergo().
+ AfterErgo = 1,
+ // Will be validated inside universe_init(), right after Metaspace::global_initialize().
+ AfterMemoryInit = 2
+ };
+
+private:
+ const char* _name;
+ ConstraintType _validate_type;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagConstraint(const char* name, ConstraintType type) { _name=name; _validate_type=type; };
+ ~JVMFlagConstraint() {};
+ const char* name() const { return _name; }
+ ConstraintType type() const { return _validate_type; }
+ virtual JVMFlag::Error apply(bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_bool(bool value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_int(int value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_intx(intx value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_uint(uint value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_uintx(uintx value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_uint64_t(uint64_t value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_size_t(size_t value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+ virtual JVMFlag::Error apply_double(double value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; };
+};
+
+class JVMFlagConstraintList : public AllStatic {
+private:
+ static GrowableArray<JVMFlagConstraint*>* _constraints;
+ // Latest constraint validation type.
+ static JVMFlagConstraint::ConstraintType _validating_type;
+public:
+ static void init();
+ static int length() { return (_constraints != NULL) ? _constraints->length() : 0; }
+ static JVMFlagConstraint* at(int i) { return (_constraints != NULL) ? _constraints->at(i) : NULL; }
+ static JVMFlagConstraint* find(const char* name);
+ static JVMFlagConstraint* find_if_needs_check(const char* name);
+ static void add(JVMFlagConstraint* constraint) { _constraints->append(constraint); }
+ // True if 'AfterErgo' or later constraint functions are validated.
+ static bool validated_after_ergo() { return _validating_type >= JVMFlagConstraint::AfterErgo; };
+ static bool check_constraints(JVMFlagConstraint::ConstraintType type);
+};
+
+#endif /* SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTLIST_HPP */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/relocInfo.hpp"
+#include "compiler/compilerDefinitions.hpp"
+#include "oops/metadata.hpp"
+#include "runtime/os.hpp"
+#include "interpreter/invocationCounter.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintsCompiler.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/globals_extension.hpp"
+#include "utilities/defaultStream.hpp"
+
+JVMFlag::Error AliasLevelConstraintFunc(intx value, bool verbose) {
+ if ((value <= 1) && (Arguments::mode() == Arguments::_comp || Arguments::mode() == Arguments::_mixed)) {
+ CommandLineError::print(verbose,
+ "AliasLevel (" INTX_FORMAT ") is not "
+ "compatible with -Xcomp or -Xmixed\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+/**
+ * Validate the minimum number of compiler threads needed to run the
+ * JVM. The following configurations are possible.
+ *
+ * 1) The JVM is build using an interpreter only. As a result, the minimum number of
+ * compiler threads is 0.
+ * 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As
+ * a result, either C1 or C2 is used, so the minimum number of compiler threads is 1.
+ * 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However,
+ * the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only
+ * C1 can be used, so the minimum number of compiler threads is 1.
+ * 4) The JVM is build using the compilers and tiered compilation is enabled. The option
+ * 'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result,
+ * the minimum number of compiler threads is 2.
+ */
+JVMFlag::Error CICompilerCountConstraintFunc(intx value, bool verbose) {
+ int min_number_of_compiler_threads = 0;
+#if !defined(COMPILER1) && !defined(COMPILER2) && !INCLUDE_JVMCI
+ // case 1
+#else
+ if (!TieredCompilation || (TieredStopAtLevel < CompLevel_full_optimization)) {
+ min_number_of_compiler_threads = 1; // case 2 or case 3
+ } else {
+ min_number_of_compiler_threads = 2; // case 4 (tiered)
+ }
+#endif
+
+ // The default CICompilerCount's value is CI_COMPILER_COUNT.
+ // With a client VM, -XX:+TieredCompilation causes TieredCompilation
+ // to be true here (the option is validated later) and
+ // min_number_of_compiler_threads to exceed CI_COMPILER_COUNT.
+ min_number_of_compiler_threads = MIN2(min_number_of_compiler_threads, CI_COMPILER_COUNT);
+
+ if (value < (intx)min_number_of_compiler_threads) {
+ CommandLineError::print(verbose,
+ "CICompilerCount (" INTX_FORMAT ") must be "
+ "at least %d \n",
+ value, min_number_of_compiler_threads);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error AllocatePrefetchDistanceConstraintFunc(intx value, bool verbose) {
+ if (value < 0 || value > 512) {
+ CommandLineError::print(verbose,
+ "AllocatePrefetchDistance (" INTX_FORMAT ") must be "
+ "between 0 and " INTX_FORMAT "\n",
+ AllocatePrefetchDistance, 512);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error AllocatePrefetchStepSizeConstraintFunc(intx value, bool verbose) {
+ if (AllocatePrefetchStyle == 3) {
+ if (value % wordSize != 0) {
+ CommandLineError::print(verbose,
+ "AllocatePrefetchStepSize (" INTX_FORMAT ") must be multiple of %d\n",
+ value, wordSize);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error AllocatePrefetchInstrConstraintFunc(intx value, bool verbose) {
+ intx max_value = max_intx;
+#if defined(SPARC)
+ max_value = 1;
+#elif defined(X86)
+ max_value = 3;
+#endif
+ if (value < 0 || value > max_value) {
+ CommandLineError::print(verbose,
+ "AllocatePrefetchInstr (" INTX_FORMAT ") must be "
+ "between 0 and " INTX_FORMAT "\n", value, max_value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CompileThresholdConstraintFunc(intx value, bool verbose) {
+ if (value < 0 || value > INT_MAX >> InvocationCounter::count_shift) {
+ CommandLineError::print(verbose,
+ "CompileThreshold (" INTX_FORMAT ") "
+ "must be between 0 and %d\n",
+ value,
+ INT_MAX >> InvocationCounter::count_shift);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error OnStackReplacePercentageConstraintFunc(intx value, bool verbose) {
+ int backward_branch_limit;
+ if (ProfileInterpreter) {
+ if (OnStackReplacePercentage < InterpreterProfilePercentage) {
+ CommandLineError::print(verbose,
+ "OnStackReplacePercentage (" INTX_FORMAT ") must be "
+ "larger than InterpreterProfilePercentage (" INTX_FORMAT ")\n",
+ OnStackReplacePercentage, InterpreterProfilePercentage);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ backward_branch_limit = ((CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100)
+ << InvocationCounter::count_shift;
+
+ if (backward_branch_limit < 0) {
+ CommandLineError::print(verbose,
+ "CompileThreshold * (InterpreterProfilePercentage - OnStackReplacePercentage) / 100 = "
+ INTX_FORMAT " "
+ "must be between 0 and " INTX_FORMAT ", try changing "
+ "CompileThreshold, InterpreterProfilePercentage, and/or OnStackReplacePercentage\n",
+ (CompileThreshold * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100,
+ INT_MAX >> InvocationCounter::count_shift);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ } else {
+ if (OnStackReplacePercentage < 0 ) {
+ CommandLineError::print(verbose,
+ "OnStackReplacePercentage (" INTX_FORMAT ") must be "
+ "non-negative\n", OnStackReplacePercentage);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ backward_branch_limit = ((CompileThreshold * OnStackReplacePercentage) / 100)
+ << InvocationCounter::count_shift;
+
+ if (backward_branch_limit < 0) {
+ CommandLineError::print(verbose,
+ "CompileThreshold * OnStackReplacePercentage / 100 = " INTX_FORMAT " "
+ "must be between 0 and " INTX_FORMAT ", try changing "
+ "CompileThreshold and/or OnStackReplacePercentage\n",
+ (CompileThreshold * OnStackReplacePercentage) / 100,
+ INT_MAX >> InvocationCounter::count_shift);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CodeCacheSegmentSizeConstraintFunc(uintx value, bool verbose) {
+ if (CodeCacheSegmentSize < (uintx)CodeEntryAlignment) {
+ CommandLineError::print(verbose,
+ "CodeCacheSegmentSize (" UINTX_FORMAT ") must be "
+ "larger than or equal to CodeEntryAlignment (" INTX_FORMAT ") "
+ "to align entry points\n",
+ CodeCacheSegmentSize, CodeEntryAlignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ if (CodeCacheSegmentSize < sizeof(jdouble)) {
+ CommandLineError::print(verbose,
+ "CodeCacheSegmentSize (" UINTX_FORMAT ") must be "
+ "at least " SIZE_FORMAT " to align constants\n",
+ CodeCacheSegmentSize, sizeof(jdouble));
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+#ifdef COMPILER2
+ if (CodeCacheSegmentSize < (uintx)OptoLoopAlignment) {
+ CommandLineError::print(verbose,
+ "CodeCacheSegmentSize (" UINTX_FORMAT ") must be "
+ "larger than or equal to OptoLoopAlignment (" INTX_FORMAT ") "
+ "to align inner loops\n",
+ CodeCacheSegmentSize, OptoLoopAlignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CompilerThreadPriorityConstraintFunc(intx value, bool verbose) {
+#ifdef SOLARIS
+ if ((value < MinimumPriority || value > MaximumPriority) &&
+ (value != -1) && (value != -FXCriticalPriority)) {
+ CommandLineError::print(verbose,
+ "CompileThreadPriority (" INTX_FORMAT ") must be "
+ "between %d and %d inclusively or -1 (means no change) "
+ "or %d (special value for critical thread class/priority)\n",
+ value, MinimumPriority, MaximumPriority, -FXCriticalPriority);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error CodeEntryAlignmentConstraintFunc(intx value, bool verbose) {
+#ifdef SPARC
+ if (CodeEntryAlignment % relocInfo::addr_unit() != 0) {
+ CommandLineError::print(verbose,
+ "CodeEntryAlignment (" INTX_FORMAT ") must be "
+ "multiple of NOP size\n", CodeEntryAlignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif
+
+ if (!is_power_of_2(value)) {
+ CommandLineError::print(verbose,
+ "CodeEntryAlignment (" INTX_FORMAT ") must be "
+ "a power of two\n", CodeEntryAlignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ if (CodeEntryAlignment < 16) {
+ CommandLineError::print(verbose,
+ "CodeEntryAlignment (" INTX_FORMAT ") must be "
+ "greater than or equal to %d\n",
+ CodeEntryAlignment, 16);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error OptoLoopAlignmentConstraintFunc(intx value, bool verbose) {
+ if (!is_power_of_2(value)) {
+ CommandLineError::print(verbose,
+ "OptoLoopAlignment (" INTX_FORMAT ") "
+ "must be a power of two\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ // Relevant on ppc, s390, sparc. Will be optimized where
+ // addr_unit() == 1.
+ if (OptoLoopAlignment % relocInfo::addr_unit() != 0) {
+ CommandLineError::print(verbose,
+ "OptoLoopAlignment (" INTX_FORMAT ") must be "
+ "multiple of NOP size (%d)\n",
+ value, relocInfo::addr_unit());
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error ArraycopyDstPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
+ if (value >= 4032) {
+ CommandLineError::print(verbose,
+ "ArraycopyDstPrefetchDistance (" UINTX_FORMAT ") must be"
+ "between 0 and 4031\n", value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose) {
+ if (value >= 4032) {
+ CommandLineError::print(verbose,
+ "ArraycopySrcPrefetchDistance (" UINTX_FORMAT ") must be"
+ "between 0 and 4031\n", value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error TypeProfileLevelConstraintFunc(uintx value, bool verbose) {
+ for (int i = 0; i < 3; i++) {
+ if (value % 10 > 2) {
+ CommandLineError::print(verbose,
+ "Invalid value (" UINTX_FORMAT ") "
+ "in TypeProfileLevel at position %d\n", value, i);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ value = value / 10;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error InitArrayShortSizeConstraintFunc(intx value, bool verbose) {
+ if (value % BytesPerLong != 0) {
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+#ifdef COMPILER2
+JVMFlag::Error InteriorEntryAlignmentConstraintFunc(intx value, bool verbose) {
+ if (InteriorEntryAlignment > CodeEntryAlignment) {
+ CommandLineError::print(verbose,
+ "InteriorEntryAlignment (" INTX_FORMAT ") must be "
+ "less than or equal to CodeEntryAlignment (" INTX_FORMAT ")\n",
+ InteriorEntryAlignment, CodeEntryAlignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+#ifdef SPARC
+ if (InteriorEntryAlignment % relocInfo::addr_unit() != 0) {
+ CommandLineError::print(verbose,
+ "InteriorEntryAlignment (" INTX_FORMAT ") must be "
+ "multiple of NOP size\n");
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+#endif
+
+ if (!is_power_of_2(value)) {
+ CommandLineError::print(verbose,
+ "InteriorEntryAlignment (" INTX_FORMAT ") must be "
+ "a power of two\n", InteriorEntryAlignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ int minimum_alignment = 16;
+#if defined(SPARC) || (defined(X86) && !defined(AMD64))
+ minimum_alignment = 4;
+#elif defined(S390)
+ minimum_alignment = 2;
+#endif
+
+ if (InteriorEntryAlignment < minimum_alignment) {
+ CommandLineError::print(verbose,
+ "InteriorEntryAlignment (" INTX_FORMAT ") must be "
+ "greater than or equal to %d\n",
+ InteriorEntryAlignment, minimum_alignment);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+
+JVMFlag::Error NodeLimitFudgeFactorConstraintFunc(intx value, bool verbose) {
+ if (value < MaxNodeLimit * 2 / 100 || value > MaxNodeLimit * 40 / 100) {
+ CommandLineError::print(verbose,
+ "NodeLimitFudgeFactor must be between 2%% and 40%% "
+ "of MaxNodeLimit (" INTX_FORMAT ")\n",
+ MaxNodeLimit);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+
+ return JVMFlag::SUCCESS;
+}
+#endif // COMPILER2
+
+JVMFlag::Error RTMTotalCountIncrRateConstraintFunc(int value, bool verbose) {
+#if INCLUDE_RTM_OPT
+ if (UseRTMLocking && !is_power_of_2(RTMTotalCountIncrRate)) {
+ CommandLineError::print(verbose,
+ "RTMTotalCountIncrRate (" INTX_FORMAT
+ ") must be a power of 2, resetting it to 64\n",
+ RTMTotalCountIncrRate);
+ FLAG_SET_DEFAULT(RTMTotalCountIncrRate, 64);
+ }
+#endif
+
+ return JVMFlag::SUCCESS;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTSCOMPILER_HPP
+#define SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTSCOMPILER_HPP
+
+#include "runtime/flags/jvmFlag.hpp"
+
+/*
+ * Here we have compiler arguments constraints functions, which are called automatically
+ * whenever flag's value changes. If the constraint fails the function should return
+ * an appropriate error value.
+ */
+
+JVMFlag::Error AliasLevelConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error CICompilerCountConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error AllocatePrefetchDistanceConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error AllocatePrefetchInstrConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error AllocatePrefetchStepSizeConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error CompileThresholdConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error OnStackReplacePercentageConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error CodeCacheSegmentSizeConstraintFunc(uintx value, bool verbose);
+
+JVMFlag::Error CompilerThreadPriorityConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error CodeEntryAlignmentConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error OptoLoopAlignmentConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error ArraycopyDstPrefetchDistanceConstraintFunc(uintx value, bool verbose);
+
+JVMFlag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verbose);
+
+JVMFlag::Error TypeProfileLevelConstraintFunc(uintx value, bool verbose);
+
+JVMFlag::Error InitArrayShortSizeConstraintFunc(intx value, bool verbose);
+
+#ifdef COMPILER2
+JVMFlag::Error InteriorEntryAlignmentConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error NodeLimitFudgeFactorConstraintFunc(intx value, bool verbose);
+#endif
+
+JVMFlag::Error RTMTotalCountIncrRateConstraintFunc(int value, bool verbose);
+
+#endif /* SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTSCOMPILER_HPP */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintsRuntime.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/safepointMechanism.hpp"
+#include "runtime/task.hpp"
+#include "utilities/defaultStream.hpp"
+
+JVMFlag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
+ if (!is_power_of_2(value)) {
+ CommandLineError::print(verbose,
+ "ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
+ "power of 2\n",
+ value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ // In case page size is very small.
+ if (value >= (intx)os::vm_page_size()) {
+ CommandLineError::print(verbose,
+ "ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
+ "less than page size (" INTX_FORMAT ")\n",
+ value, (intx)os::vm_page_size());
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ return JVMFlag::SUCCESS;
+}
+
+// Need to enforce the padding not to break the existing field alignments.
+// It is sufficient to check against the largest type size.
+JVMFlag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose) {
+ if ((value % BytesPerLong) != 0) {
+ CommandLineError::print(verbose,
+ "ContendedPaddingWidth (" INTX_FORMAT ") must be "
+ "a multiple of %d\n",
+ value, BytesPerLong);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose) {
+ if (value > BiasedLockingBulkRevokeThreshold) {
+ CommandLineError::print(verbose,
+ "BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ") must be "
+ "less than or equal to BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")\n",
+ value, BiasedLockingBulkRevokeThreshold);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose) {
+ if ((value % PeriodicTask::interval_gran) != 0) {
+ CommandLineError::print(verbose,
+ "BiasedLockingStartupDelay (" INTX_FORMAT ") must be "
+ "evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
+ value, PeriodicTask::interval_gran);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose) {
+ if (value < BiasedLockingBulkRebiasThreshold) {
+ CommandLineError::print(verbose,
+ "BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ") must be "
+ "greater than or equal to BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")\n",
+ value, BiasedLockingBulkRebiasThreshold);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else if ((double)value/(double)BiasedLockingDecayTime > 0.1) {
+ CommandLineError::print(verbose,
+ "The ratio of BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")"
+ " to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
+ "less than or equal to 0.1\n",
+ value, BiasedLockingBulkRebiasThreshold);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose) {
+ if (BiasedLockingBulkRebiasThreshold/(double)value > 0.1) {
+ CommandLineError::print(verbose,
+ "The ratio of BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")"
+ " to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
+ "less than or equal to 0.1\n",
+ BiasedLockingBulkRebiasThreshold, value);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose) {
+ if ((value % PeriodicTask::interval_gran != 0)) {
+ CommandLineError::print(verbose,
+ "PerfDataSamplingInterval (" INTX_FORMAT ") must be "
+ "evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
+ value, PeriodicTask::interval_gran);
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+}
+
+JVMFlag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose) {
+ if (value) {
+ if (!SafepointMechanism::supports_thread_local_poll()) {
+ CommandLineError::print(verbose, "ThreadLocalHandshakes not yet supported on this platform\n");
+ return JVMFlag::VIOLATES_CONSTRAINT;
+ }
+ }
+ return JVMFlag::SUCCESS;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsRuntime.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTSRUNTIME_HPP
+#define SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTSRUNTIME_HPP
+
+#include "runtime/flags/jvmFlag.hpp"
+
+/*
+ * Here we have runtime arguments constraints functions, which are called automatically
+ * whenever flag's value changes. If the constraint fails the function should return
+ * an appropriate error value.
+ */
+
+JVMFlag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose);
+
+JVMFlag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose);
+JVMFlag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose);
+JVMFlag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose);
+JVMFlag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose);
+
+JVMFlag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
+
+JVMFlag::Error ThreadLocalHandshakesConstraintFunc(bool value, bool verbose);
+
+
+#endif /* SHARE_VM_RUNTIME_JVMFLAGCONSTRAINTSRUNTIME_HPP */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagRangeList.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "jvm.h"
+#include "classfile/stringTable.hpp"
+#include "classfile/symbolTable.hpp"
+#include "gc/shared/referenceProcessor.hpp"
+#include "oops/markOop.hpp"
+#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/globals_extension.hpp"
+#include "runtime/os.hpp"
+#include "runtime/task.hpp"
+#include "utilities/defaultStream.hpp"
+#include "utilities/macros.hpp"
+
+void CommandLineError::print(bool verbose, const char* msg, ...) {
+ if (verbose) {
+ va_list listPointer;
+ va_start(listPointer, msg);
+ jio_vfprintf(defaultStream::error_stream(), msg, listPointer);
+ va_end(listPointer);
+ }
+}
+
+class JVMFlagRange_int : public JVMFlagRange {
+ int _min;
+ int _max;
+ const int* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_int(const char* name, const int* ptr, int min, int max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_int(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_int(int value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "int %s=%d is outside the allowed range "
+ "[ %d ... %d ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ %-25d ... %25d ]", _min, _max);
+ }
+};
+
+class JVMFlagRange_intx : public JVMFlagRange {
+ intx _min;
+ intx _max;
+ const intx* _ptr;
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_intx(const char* name, const intx* ptr, intx min, intx max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_intx(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_intx(intx value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "intx %s=" INTX_FORMAT " is outside the allowed range "
+ "[ " INTX_FORMAT " ... " INTX_FORMAT " ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ " INTX_FORMAT_W(-25) " ... " INTX_FORMAT_W(25) " ]", _min, _max);
+ }
+};
+
+class JVMFlagRange_uint : public JVMFlagRange {
+ uint _min;
+ uint _max;
+ const uint* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_uint(const char* name, const uint* ptr, uint min, uint max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_uint(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_uint(uint value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "uint %s=%u is outside the allowed range "
+ "[ %u ... %u ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ %-25u ... %25u ]", _min, _max);
+ }
+};
+
+class JVMFlagRange_uintx : public JVMFlagRange {
+ uintx _min;
+ uintx _max;
+ const uintx* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_uintx(const char* name, const uintx* ptr, uintx min, uintx max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_uintx(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_uintx(uintx value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "uintx %s=" UINTX_FORMAT " is outside the allowed range "
+ "[ " UINTX_FORMAT " ... " UINTX_FORMAT " ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ " UINTX_FORMAT_W(-25) " ... " UINTX_FORMAT_W(25) " ]", _min, _max);
+ }
+};
+
+class JVMFlagRange_uint64_t : public JVMFlagRange {
+ uint64_t _min;
+ uint64_t _max;
+ const uint64_t* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_uint64_t(const char* name, const uint64_t* ptr, uint64_t min, uint64_t max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_uint64_t(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_uint64_t(uint64_t value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "uint64_t %s=" UINT64_FORMAT " is outside the allowed range "
+ "[ " UINT64_FORMAT " ... " UINT64_FORMAT " ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ " UINT64_FORMAT_W(-25) " ... " UINT64_FORMAT_W(25) " ]", _min, _max);
+ }
+};
+
+class JVMFlagRange_size_t : public JVMFlagRange {
+ size_t _min;
+ size_t _max;
+ const size_t* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_size_t(const char* name, const size_t* ptr, size_t min, size_t max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_size_t(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_size_t(size_t value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "size_t %s=" SIZE_FORMAT " is outside the allowed range "
+ "[ " SIZE_FORMAT " ... " SIZE_FORMAT " ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ " SIZE_FORMAT_W(-25) " ... " SIZE_FORMAT_W(25) " ]", _min, _max);
+ }
+};
+
+class JVMFlagRange_double : public JVMFlagRange {
+ double _min;
+ double _max;
+ const double* _ptr;
+
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange_double(const char* name, const double* ptr, double min, double max)
+ : JVMFlagRange(name), _min(min), _max(max), _ptr(ptr) {}
+
+ JVMFlag::Error check(bool verbose = true) {
+ return check_double(*_ptr, verbose);
+ }
+
+ JVMFlag::Error check_double(double value, bool verbose = true) {
+ if ((value < _min) || (value > _max)) {
+ CommandLineError::print(verbose,
+ "double %s=%f is outside the allowed range "
+ "[ %f ... %f ]\n",
+ name(), value, _min, _max);
+ return JVMFlag::OUT_OF_BOUNDS;
+ } else {
+ return JVMFlag::SUCCESS;
+ }
+ }
+
+ void print(outputStream* st) {
+ st->print("[ %-25.3f ... %25.3f ]", _min, _max);
+ }
+};
+
+// No constraint emitting
+void emit_range_no(...) { /* NOP */ }
+
+// No constraint emitting if function argument is NOT provided
+void emit_range_bool(const char* /*name*/, const bool* /*value*/) { /* NOP */ }
+void emit_range_ccstr(const char* /*name*/, const ccstr* /*value*/) { /* NOP */ }
+void emit_range_ccstrlist(const char* /*name*/, const ccstrlist* /*value*/) { /* NOP */ }
+void emit_range_int(const char* /*name*/, const int* /*value*/) { /* NOP */ }
+void emit_range_intx(const char* /*name*/, const intx* /*value*/) { /* NOP */ }
+void emit_range_uint(const char* /*name*/, const uint* /*value*/) { /* NOP */ }
+void emit_range_uintx(const char* /*name*/, const uintx* /*value*/) { /* NOP */ }
+void emit_range_uint64_t(const char* /*name*/, const uint64_t* /*value*/) { /* NOP */ }
+void emit_range_size_t(const char* /*name*/, const size_t* /*value*/) { /* NOP */ }
+void emit_range_double(const char* /*name*/, const double* /*value*/) { /* NOP */ }
+
+// JVMFlagRange emitting code functions if range arguments are provided
+void emit_range_int(const char* name, const int* ptr, int min, int max) {
+ JVMFlagRangeList::add(new JVMFlagRange_int(name, ptr, min, max));
+}
+void emit_range_intx(const char* name, const intx* ptr, intx min, intx max) {
+ JVMFlagRangeList::add(new JVMFlagRange_intx(name, ptr, min, max));
+}
+void emit_range_uint(const char* name, const uint* ptr, uint min, uint max) {
+ JVMFlagRangeList::add(new JVMFlagRange_uint(name, ptr, min, max));
+}
+void emit_range_uintx(const char* name, const uintx* ptr, uintx min, uintx max) {
+ JVMFlagRangeList::add(new JVMFlagRange_uintx(name, ptr, min, max));
+}
+void emit_range_uint64_t(const char* name, const uint64_t* ptr, uint64_t min, uint64_t max) {
+ JVMFlagRangeList::add(new JVMFlagRange_uint64_t(name, ptr, min, max));
+}
+void emit_range_size_t(const char* name, const size_t* ptr, size_t min, size_t max) {
+ JVMFlagRangeList::add(new JVMFlagRange_size_t(name, ptr, min, max));
+}
+void emit_range_double(const char* name, const double* ptr, double min, double max) {
+ JVMFlagRangeList::add(new JVMFlagRange_double(name, ptr, min, max));
+}
+
+// Generate code to call emit_range_xxx function
+#define EMIT_RANGE_PRODUCT_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_COMMERCIAL_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_DIAGNOSTIC_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_EXPERIMENTAL_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_MANAGEABLE_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_PRODUCT_RW_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_PD_PRODUCT_FLAG(type, name, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_PD_DIAGNOSTIC_FLAG(type, name, doc) ); emit_range_##type(#name,&name
+#ifndef PRODUCT
+#define EMIT_RANGE_DEVELOPER_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_PD_DEVELOPER_FLAG(type, name, doc) ); emit_range_##type(#name,&name
+#define EMIT_RANGE_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#else
+#define EMIT_RANGE_DEVELOPER_FLAG(type, name, value, doc) ); emit_range_no(#name,&name
+#define EMIT_RANGE_PD_DEVELOPER_FLAG(type, name, doc) ); emit_range_no(#name,&name
+#define EMIT_RANGE_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_range_no(#name,&name
+#endif
+#ifdef _LP64
+#define EMIT_RANGE_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_range_##type(#name,&name
+#else
+#define EMIT_RANGE_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_range_no(#name,&name
+#endif
+
+// Generate func argument to pass into emit_range_xxx functions
+#define EMIT_RANGE_CHECK(a, b) , a, b
+
+#define INITIAL_RANGES_SIZE 379
+GrowableArray<JVMFlagRange*>* JVMFlagRangeList::_ranges = NULL;
+
+// Check the ranges of all flags that have them
+void JVMFlagRangeList::init(void) {
+
+ _ranges = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagRange*>(INITIAL_RANGES_SIZE, true);
+
+ emit_range_no(NULL VM_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
+ EMIT_RANGE_PD_DEVELOPER_FLAG,
+ EMIT_RANGE_PRODUCT_FLAG,
+ EMIT_RANGE_PD_PRODUCT_FLAG,
+ EMIT_RANGE_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_EXPERIMENTAL_FLAG,
+ EMIT_RANGE_NOTPRODUCT_FLAG,
+ EMIT_RANGE_MANAGEABLE_FLAG,
+ EMIT_RANGE_PRODUCT_RW_FLAG,
+ EMIT_RANGE_LP64_PRODUCT_FLAG,
+ EMIT_RANGE_CHECK,
+ IGNORE_CONSTRAINT,
+ IGNORE_WRITEABLE));
+
+ EMIT_RANGES_FOR_GLOBALS_EXT
+
+ emit_range_no(NULL ARCH_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
+ EMIT_RANGE_PRODUCT_FLAG,
+ EMIT_RANGE_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_EXPERIMENTAL_FLAG,
+ EMIT_RANGE_NOTPRODUCT_FLAG,
+ EMIT_RANGE_CHECK,
+ IGNORE_CONSTRAINT,
+ IGNORE_WRITEABLE));
+
+#if INCLUDE_JVMCI
+ emit_range_no(NULL JVMCI_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
+ EMIT_RANGE_PD_DEVELOPER_FLAG,
+ EMIT_RANGE_PRODUCT_FLAG,
+ EMIT_RANGE_PD_PRODUCT_FLAG,
+ EMIT_RANGE_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_EXPERIMENTAL_FLAG,
+ EMIT_RANGE_NOTPRODUCT_FLAG,
+ EMIT_RANGE_CHECK,
+ IGNORE_CONSTRAINT,
+ IGNORE_WRITEABLE));
+#endif // INCLUDE_JVMCI
+
+#ifdef COMPILER1
+ emit_range_no(NULL C1_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
+ EMIT_RANGE_PD_DEVELOPER_FLAG,
+ EMIT_RANGE_PRODUCT_FLAG,
+ EMIT_RANGE_PD_PRODUCT_FLAG,
+ EMIT_RANGE_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_NOTPRODUCT_FLAG,
+ EMIT_RANGE_CHECK,
+ IGNORE_CONSTRAINT,
+ IGNORE_WRITEABLE));
+#endif // COMPILER1
+
+#ifdef COMPILER2
+ emit_range_no(NULL C2_FLAGS(EMIT_RANGE_DEVELOPER_FLAG,
+ EMIT_RANGE_PD_DEVELOPER_FLAG,
+ EMIT_RANGE_PRODUCT_FLAG,
+ EMIT_RANGE_PD_PRODUCT_FLAG,
+ EMIT_RANGE_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_PD_DIAGNOSTIC_FLAG,
+ EMIT_RANGE_EXPERIMENTAL_FLAG,
+ EMIT_RANGE_NOTPRODUCT_FLAG,
+ EMIT_RANGE_CHECK,
+ IGNORE_CONSTRAINT,
+ IGNORE_WRITEABLE));
+#endif // COMPILER2
+}
+
+JVMFlagRange* JVMFlagRangeList::find(const char* name) {
+ JVMFlagRange* found = NULL;
+ for (int i=0; i<length(); i++) {
+ JVMFlagRange* range = at(i);
+ if (strcmp(range->name(), name) == 0) {
+ found = range;
+ break;
+ }
+ }
+ return found;
+}
+
+void JVMFlagRangeList::print(outputStream* st, const char* name, RangeStrFunc default_range_str_func) {
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
+ if (range != NULL) {
+ range->print(st);
+ } else {
+ JVMFlagConstraint* constraint = JVMFlagConstraintList::find(name);
+ if (constraint != NULL) {
+ assert(default_range_str_func!=NULL, "default_range_str_func must be provided");
+ st->print("%s", default_range_str_func());
+ } else {
+ st->print("[ ... ]");
+ }
+ }
+}
+
+bool JVMFlagRangeList::check_ranges() {
+ // Check ranges.
+ bool status = true;
+ for (int i=0; i<length(); i++) {
+ JVMFlagRange* range = at(i);
+ if (range->check(true) != JVMFlag::SUCCESS) status = false;
+ }
+ return status;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagRangeList.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_JVMFLAGRANGELIST_HPP
+#define SHARE_VM_RUNTIME_JVMFLAGRANGELIST_HPP
+
+#include "memory/metaspaceShared.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "utilities/growableArray.hpp"
+
+/*
+ * Here we have a mechanism for extracting ranges specified in flag macro tables.
+ *
+ * The specified ranges are used to verify that flags have valid values.
+ *
+ * An example of a range is "min <= flag <= max". Both "min" and "max" must be
+ * constant and can not change. If either "min" or "max" can change,
+ * then we need to use constraint instead.
+ */
+
+class CommandLineError : public AllStatic {
+public:
+ static void print(bool verbose, const char* msg, ...);
+};
+
+class JVMFlagRange : public CHeapObj<mtArguments> {
+private:
+ const char* _name;
+public:
+ // the "name" argument must be a string literal
+ JVMFlagRange(const char* name) { _name=name; }
+ ~JVMFlagRange() {}
+ const char* name() { return _name; }
+ virtual JVMFlag::Error check(bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_int(int value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_intx(intx value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_uint(uint value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_uintx(uintx value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_uint64_t(uint64_t value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_size_t(size_t value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual JVMFlag::Error check_double(double value, bool verbose = true) { ShouldNotReachHere(); return JVMFlag::ERR_OTHER; }
+ virtual void print(outputStream* st) { ; }
+};
+
+class JVMFlagRangeList : public AllStatic {
+ static GrowableArray<JVMFlagRange*>* _ranges;
+public:
+ static void init();
+ static int length() { return (_ranges != NULL) ? _ranges->length() : 0; }
+ static JVMFlagRange* at(int i) { return (_ranges != NULL) ? _ranges->at(i) : NULL; }
+ static JVMFlagRange* find(const char* name);
+ static void add(JVMFlagRange* range) { _ranges->append(range); }
+ static void print(outputStream* st, const char* name, RangeStrFunc default_range_str_func);
+ // Check the final values of all flags for ranges.
+ static bool check_ranges();
+};
+
+#endif // SHARE_VM_RUNTIME_JVMFLAGRANGELIST_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagWriteableList.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/plab.hpp"
+#include "runtime/flags/jvmFlagWriteableList.hpp"
+#include "runtime/os.hpp"
+#ifdef COMPILER1
+#include "c1/c1_globals.hpp"
+#endif // COMPILER1
+#ifdef COMPILER2
+#include "opto/c2_globals.hpp"
+#endif // COMPILER2
+#if INCLUDE_JVMCI
+#include "jvmci/jvmci_globals.hpp"
+#endif
+
+bool JVMFlagWriteable::is_writeable(void) {
+ return _writeable;
+}
+
+void JVMFlagWriteable::mark_once(void) {
+ if (_type == Once) {
+ _writeable = false;
+ }
+}
+
+void JVMFlagWriteable::mark_startup(void) {
+ if (_type == JVMFlagWriteable::CommandLineOnly) {
+ _writeable = false;
+ }
+}
+
+// No control emitting
+void emit_writeable_no(...) { /* NOP */ }
+
+// No control emitting if type argument is NOT provided
+void emit_writeable_bool(const char* /*name*/) { /* NOP */ }
+void emit_writeable_ccstr(const char* /*name*/) { /* NOP */ }
+void emit_writeable_ccstrlist(const char* /*name*/) { /* NOP */ }
+void emit_writeable_int(const char* /*name*/) { /* NOP */ }
+void emit_writeable_intx(const char* /*name*/) { /* NOP */ }
+void emit_writeable_uint(const char* /*name*/) { /* NOP */ }
+void emit_writeable_uintx(const char* /*name*/) { /* NOP */ }
+void emit_writeable_uint64_t(const char* /*name*/) { /* NOP */ }
+void emit_writeable_size_t(const char* /*name*/) { /* NOP */ }
+void emit_writeable_double(const char* /*name*/) { /* NOP */ }
+
+// JVMFlagWriteable emitting code functions if range arguments are provided
+void emit_writeable_bool(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_int(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_intx(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_uint(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_uintx(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_uint64_t(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_size_t(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+void emit_writeable_double(const char* name, JVMFlagWriteable::WriteableType type) {
+ JVMFlagWriteableList::add(new JVMFlagWriteable(name, type));
+}
+
+// Generate code to call emit_writeable_xxx function
+#define EMIT_WRITEABLE_PRODUCT_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_COMMERCIAL_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_DIAGNOSTIC_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_EXPERIMENTAL_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_MANAGEABLE_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_PRODUCT_RW_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_PD_PRODUCT_FLAG(type, name, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_DEVELOPER_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_PD_DEVELOPER_FLAG(type, name, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG(type, name, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_NOTPRODUCT_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+#define EMIT_WRITEABLE_LP64_PRODUCT_FLAG(type, name, value, doc) ); emit_writeable_##type(#name
+
+// Generate type argument to pass into emit_writeable_xxx functions
+#define EMIT_WRITEABLE(a) , JVMFlagWriteable::a
+
+#define INITIAL_WRITEABLES_SIZE 2
+GrowableArray<JVMFlagWriteable*>* JVMFlagWriteableList::_controls = NULL;
+
+void JVMFlagWriteableList::init(void) {
+
+ _controls = new (ResourceObj::C_HEAP, mtArguments) GrowableArray<JVMFlagWriteable*>(INITIAL_WRITEABLES_SIZE, true);
+
+ emit_writeable_no(NULL VM_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PRODUCT_FLAG,
+ EMIT_WRITEABLE_PD_PRODUCT_FLAG,
+ EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
+ EMIT_WRITEABLE_NOTPRODUCT_FLAG,
+ EMIT_WRITEABLE_MANAGEABLE_FLAG,
+ EMIT_WRITEABLE_PRODUCT_RW_FLAG,
+ EMIT_WRITEABLE_LP64_PRODUCT_FLAG,
+ IGNORE_RANGE,
+ IGNORE_CONSTRAINT,
+ EMIT_WRITEABLE));
+
+ EMIT_WRITEABLES_FOR_GLOBALS_EXT
+
+ emit_writeable_no(NULL ARCH_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PRODUCT_FLAG,
+ EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
+ EMIT_WRITEABLE_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ IGNORE_CONSTRAINT,
+ EMIT_WRITEABLE));
+
+#if INCLUDE_JVMCI
+ emit_writeable_no(NULL JVMCI_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PRODUCT_FLAG,
+ EMIT_WRITEABLE_PD_PRODUCT_FLAG,
+ EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
+ EMIT_WRITEABLE_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ IGNORE_CONSTRAINT,
+ EMIT_WRITEABLE));
+#endif // INCLUDE_JVMCI
+
+#ifdef COMPILER1
+ emit_writeable_no(NULL C1_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PRODUCT_FLAG,
+ EMIT_WRITEABLE_PD_PRODUCT_FLAG,
+ EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ IGNORE_CONSTRAINT,
+ EMIT_WRITEABLE));
+#endif // COMPILER1
+
+#ifdef COMPILER2
+ emit_writeable_no(NULL C2_FLAGS(EMIT_WRITEABLE_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PD_DEVELOPER_FLAG,
+ EMIT_WRITEABLE_PRODUCT_FLAG,
+ EMIT_WRITEABLE_PD_PRODUCT_FLAG,
+ EMIT_WRITEABLE_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_PD_DIAGNOSTIC_FLAG,
+ EMIT_WRITEABLE_EXPERIMENTAL_FLAG,
+ EMIT_WRITEABLE_NOTPRODUCT_FLAG,
+ IGNORE_RANGE,
+ IGNORE_CONSTRAINT,
+ EMIT_WRITEABLE));
+#endif // COMPILER2
+}
+
+JVMFlagWriteable* JVMFlagWriteableList::find(const char* name) {
+ JVMFlagWriteable* found = NULL;
+ for (int i=0; i<length(); i++) {
+ JVMFlagWriteable* writeable = at(i);
+ if (strcmp(writeable->name(), name) == 0) {
+ found = writeable;
+ break;
+ }
+ }
+ return found;
+}
+
+void JVMFlagWriteableList::mark_startup(void) {
+ for (int i=0; i<length(); i++) {
+ JVMFlagWriteable* writeable = at(i);
+ writeable->mark_startup();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagWriteableList.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_JVMFLAGWRITEABLE_HPP
+#define SHARE_VM_RUNTIME_JVMFLAGWRITEABLE_HPP
+
+#include "utilities/growableArray.hpp"
+
+class JVMFlagWriteable : public CHeapObj<mtArguments> {
+public:
+ enum WriteableType {
+ // can be set without any limits
+ Always = 0,
+ // can only be set once, either via command lines or during runtime
+ Once = 1,
+ // can only be set on command line (multiple times allowed)
+ CommandLineOnly = 2
+ };
+private:
+ const char* _name;
+ WriteableType _type;
+ bool _writeable;
+ bool _startup_done;
+public:
+ // the "name" argument must be a string literal
+ JVMFlagWriteable(const char* name, WriteableType type) { _name=name; _type=type; _writeable=true; _startup_done=false; }
+ ~JVMFlagWriteable() {}
+ const char* name() { return _name; }
+ const WriteableType type() { return _type; }
+ bool is_writeable(void);
+ void mark_once(void);
+ void mark_startup(void);
+};
+
+class JVMFlagWriteableList : public AllStatic {
+ static GrowableArray<JVMFlagWriteable*>* _controls;
+public:
+ static void init();
+ static int length() { return (_controls != NULL) ? _controls->length() : 0; }
+ static JVMFlagWriteable* at(int i) { return (_controls != NULL) ? _controls->at(i) : NULL; }
+ static JVMFlagWriteable* find(const char* name);
+ static void add(JVMFlagWriteable* range) { _controls->append(range); }
+ static void mark_startup(void);
+};
+
+#endif // SHARE_VM_RUNTIME_JVMFLAGWRITEABLE_HPP
--- a/src/hotspot/share/runtime/globals.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/globals.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -29,9 +29,9 @@
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/globals_extension.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
-#include "runtime/commandLineFlagWriteableList.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagWriteableList.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "trace/tracing.hpp"
@@ -85,1473 +85,3 @@
IGNORE_WRITEABLE)
MATERIALIZE_FLAGS_EXT
-
-#define DEFAULT_RANGE_STR_CHUNK_SIZE 64
-static char* create_range_str(const char *fmt, ...) {
- static size_t string_length = DEFAULT_RANGE_STR_CHUNK_SIZE;
- static char* range_string = NEW_C_HEAP_ARRAY(char, string_length, mtLogging);
-
- int size_needed = 0;
- do {
- va_list args;
- va_start(args, fmt);
- size_needed = jio_vsnprintf(range_string, string_length, fmt, args);
- va_end(args);
-
- if (size_needed < 0) {
- string_length += DEFAULT_RANGE_STR_CHUNK_SIZE;
- range_string = REALLOC_C_HEAP_ARRAY(char, range_string, string_length, mtLogging);
- guarantee(range_string != NULL, "create_range_str string should not be NULL");
- }
- } while (size_needed < 0);
-
- return range_string;
-}
-
-const char* Flag::get_int_default_range_str() {
- return create_range_str("[ " INT32_FORMAT_W(-25) " ... " INT32_FORMAT_W(25) " ]", INT_MIN, INT_MAX);
-}
-
-const char* Flag::get_uint_default_range_str() {
- return create_range_str("[ " UINT32_FORMAT_W(-25) " ... " UINT32_FORMAT_W(25) " ]", 0, UINT_MAX);
-}
-
-const char* Flag::get_intx_default_range_str() {
- return create_range_str("[ " INTX_FORMAT_W(-25) " ... " INTX_FORMAT_W(25) " ]", min_intx, max_intx);
-}
-
-const char* Flag::get_uintx_default_range_str() {
- return create_range_str("[ " UINTX_FORMAT_W(-25) " ... " UINTX_FORMAT_W(25) " ]", 0, max_uintx);
-}
-
-const char* Flag::get_uint64_t_default_range_str() {
- return create_range_str("[ " UINT64_FORMAT_W(-25) " ... " UINT64_FORMAT_W(25) " ]", 0, uint64_t(max_juint));
-}
-
-const char* Flag::get_size_t_default_range_str() {
- return create_range_str("[ " SIZE_FORMAT_W(-25) " ... " SIZE_FORMAT_W(25) " ]", 0, SIZE_MAX);
-}
-
-const char* Flag::get_double_default_range_str() {
- return create_range_str("[ %-25.3f ... %25.3f ]", DBL_MIN, DBL_MAX);
-}
-
-static bool is_product_build() {
-#ifdef PRODUCT
- return true;
-#else
- return false;
-#endif
-}
-
-Flag::Error Flag::check_writable(bool changed) {
- if (is_constant_in_binary()) {
- fatal("flag is constant: %s", _name);
- }
-
- Flag::Error error = Flag::SUCCESS;
- if (changed) {
- CommandLineFlagWriteable* writeable = CommandLineFlagWriteableList::find(_name);
- if (writeable) {
- if (writeable->is_writeable() == false) {
- switch (writeable->type())
- {
- case CommandLineFlagWriteable::Once:
- error = Flag::SET_ONLY_ONCE;
- jio_fprintf(defaultStream::error_stream(), "Error: %s may not be set more than once\n", _name);
- break;
- case CommandLineFlagWriteable::CommandLineOnly:
- error = Flag::COMMAND_LINE_ONLY;
- jio_fprintf(defaultStream::error_stream(), "Error: %s may be modified only from commad line\n", _name);
- break;
- default:
- ShouldNotReachHere();
- break;
- }
- }
- writeable->mark_once();
- }
- }
- return error;
-}
-
-bool Flag::is_bool() const {
- return strcmp(_type, "bool") == 0;
-}
-
-bool Flag::get_bool() const {
- return *((bool*) _addr);
-}
-
-Flag::Error Flag::set_bool(bool value) {
- Flag::Error error = check_writable(value!=get_bool());
- if (error == Flag::SUCCESS) {
- *((bool*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_int() const {
- return strcmp(_type, "int") == 0;
-}
-
-int Flag::get_int() const {
- return *((int*) _addr);
-}
-
-Flag::Error Flag::set_int(int value) {
- Flag::Error error = check_writable(value!=get_int());
- if (error == Flag::SUCCESS) {
- *((int*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_uint() const {
- return strcmp(_type, "uint") == 0;
-}
-
-uint Flag::get_uint() const {
- return *((uint*) _addr);
-}
-
-Flag::Error Flag::set_uint(uint value) {
- Flag::Error error = check_writable(value!=get_uint());
- if (error == Flag::SUCCESS) {
- *((uint*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_intx() const {
- return strcmp(_type, "intx") == 0;
-}
-
-intx Flag::get_intx() const {
- return *((intx*) _addr);
-}
-
-Flag::Error Flag::set_intx(intx value) {
- Flag::Error error = check_writable(value!=get_intx());
- if (error == Flag::SUCCESS) {
- *((intx*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_uintx() const {
- return strcmp(_type, "uintx") == 0;
-}
-
-uintx Flag::get_uintx() const {
- return *((uintx*) _addr);
-}
-
-Flag::Error Flag::set_uintx(uintx value) {
- Flag::Error error = check_writable(value!=get_uintx());
- if (error == Flag::SUCCESS) {
- *((uintx*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_uint64_t() const {
- return strcmp(_type, "uint64_t") == 0;
-}
-
-uint64_t Flag::get_uint64_t() const {
- return *((uint64_t*) _addr);
-}
-
-Flag::Error Flag::set_uint64_t(uint64_t value) {
- Flag::Error error = check_writable(value!=get_uint64_t());
- if (error == Flag::SUCCESS) {
- *((uint64_t*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_size_t() const {
- return strcmp(_type, "size_t") == 0;
-}
-
-size_t Flag::get_size_t() const {
- return *((size_t*) _addr);
-}
-
-Flag::Error Flag::set_size_t(size_t value) {
- Flag::Error error = check_writable(value!=get_size_t());
- if (error == Flag::SUCCESS) {
- *((size_t*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_double() const {
- return strcmp(_type, "double") == 0;
-}
-
-double Flag::get_double() const {
- return *((double*) _addr);
-}
-
-Flag::Error Flag::set_double(double value) {
- Flag::Error error = check_writable(value!=get_double());
- if (error == Flag::SUCCESS) {
- *((double*) _addr) = value;
- }
- return error;
-}
-
-bool Flag::is_ccstr() const {
- return strcmp(_type, "ccstr") == 0 || strcmp(_type, "ccstrlist") == 0;
-}
-
-bool Flag::ccstr_accumulates() const {
- return strcmp(_type, "ccstrlist") == 0;
-}
-
-ccstr Flag::get_ccstr() const {
- return *((ccstr*) _addr);
-}
-
-Flag::Error Flag::set_ccstr(ccstr value) {
- Flag::Error error = check_writable(value!=get_ccstr());
- if (error == Flag::SUCCESS) {
- *((ccstr*) _addr) = value;
- }
- return error;
-}
-
-
-Flag::Flags Flag::get_origin() {
- return Flags(_flags & VALUE_ORIGIN_MASK);
-}
-
-void Flag::set_origin(Flags origin) {
- assert((origin & VALUE_ORIGIN_MASK) == origin, "sanity");
- Flags new_origin = Flags((origin == COMMAND_LINE) ? Flags(origin | ORIG_COMMAND_LINE) : origin);
- _flags = Flags((_flags & ~VALUE_ORIGIN_MASK) | new_origin);
-}
-
-bool Flag::is_default() {
- return (get_origin() == DEFAULT);
-}
-
-bool Flag::is_ergonomic() {
- return (get_origin() == ERGONOMIC);
-}
-
-bool Flag::is_command_line() {
- return (_flags & ORIG_COMMAND_LINE) != 0;
-}
-
-void Flag::set_command_line() {
- _flags = Flags(_flags | ORIG_COMMAND_LINE);
-}
-
-bool Flag::is_product() const {
- return (_flags & KIND_PRODUCT) != 0;
-}
-
-bool Flag::is_manageable() const {
- return (_flags & KIND_MANAGEABLE) != 0;
-}
-
-bool Flag::is_diagnostic() const {
- return (_flags & KIND_DIAGNOSTIC) != 0;
-}
-
-bool Flag::is_experimental() const {
- return (_flags & KIND_EXPERIMENTAL) != 0;
-}
-
-bool Flag::is_notproduct() const {
- return (_flags & KIND_NOT_PRODUCT) != 0;
-}
-
-bool Flag::is_develop() const {
- return (_flags & KIND_DEVELOP) != 0;
-}
-
-bool Flag::is_read_write() const {
- return (_flags & KIND_READ_WRITE) != 0;
-}
-
-bool Flag::is_commercial() const {
- return (_flags & KIND_COMMERCIAL) != 0;
-}
-
-/**
- * Returns if this flag is a constant in the binary. Right now this is
- * true for notproduct and develop flags in product builds.
- */
-bool Flag::is_constant_in_binary() const {
-#ifdef PRODUCT
- return is_notproduct() || is_develop();
-#else
- return false;
-#endif
-}
-
-bool Flag::is_unlocker() const {
- return strcmp(_name, "UnlockDiagnosticVMOptions") == 0 ||
- strcmp(_name, "UnlockExperimentalVMOptions") == 0 ||
- is_unlocker_ext();
-}
-
-bool Flag::is_unlocked() const {
- if (is_diagnostic()) {
- return UnlockDiagnosticVMOptions;
- }
- if (is_experimental()) {
- return UnlockExperimentalVMOptions;
- }
- return is_unlocked_ext();
-}
-
-void Flag::clear_diagnostic() {
- assert(is_diagnostic(), "sanity");
- _flags = Flags(_flags & ~KIND_DIAGNOSTIC);
- assert(!is_diagnostic(), "sanity");
-}
-
-// Get custom message for this locked flag, or NULL if
-// none is available. Returns message type produced.
-Flag::MsgType Flag::get_locked_message(char* buf, int buflen) const {
- buf[0] = '\0';
- if (is_diagnostic() && !is_unlocked()) {
- jio_snprintf(buf, buflen,
- "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n"
- "Error: The unlock option must precede '%s'.\n",
- _name, _name);
- return Flag::DIAGNOSTIC_FLAG_BUT_LOCKED;
- }
- if (is_experimental() && !is_unlocked()) {
- jio_snprintf(buf, buflen,
- "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n"
- "Error: The unlock option must precede '%s'.\n",
- _name, _name);
- return Flag::EXPERIMENTAL_FLAG_BUT_LOCKED;
- }
- if (is_develop() && is_product_build()) {
- jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
- _name);
- return Flag::DEVELOPER_FLAG_BUT_PRODUCT_BUILD;
- }
- if (is_notproduct() && is_product_build()) {
- jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
- _name);
- return Flag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD;
- }
- return get_locked_message_ext(buf, buflen);
-}
-
-bool Flag::is_writeable() const {
- return is_manageable() || (is_product() && is_read_write()) || is_writeable_ext();
-}
-
-// All flags except "manageable" are assumed to be internal flags.
-// Long term, we need to define a mechanism to specify which flags
-// are external/stable and change this function accordingly.
-bool Flag::is_external() const {
- return is_manageable() || is_external_ext();
-}
-
-// Helper function for Flag::print_on().
-// Fills current line up to requested position.
-// Should the current position already be past the requested position,
-// one separator blank is enforced.
-void fill_to_pos(outputStream* st, unsigned int req_pos) {
- if ((unsigned int)st->position() < req_pos) {
- st->fill_to(req_pos); // need to fill with blanks to reach req_pos
- } else {
- st->print(" "); // enforce blank separation. Previous field too long.
- }
-}
-
-void Flag::print_on(outputStream* st, bool withComments, bool printRanges) {
- // Don't print notproduct and develop flags in a product build.
- if (is_constant_in_binary()) {
- return;
- }
-
- if (!printRanges) {
- // The command line options -XX:+PrintFlags* cause this function to be called
- // for each existing flag to print information pertinent to this flag. The data
- // is displayed in columnar form, with the following layout:
- // col1 - data type, right-justified
- // col2 - name, left-justified
- // col3 - ' =' double-char, leading space to align with possible '+='
- // col4 - value left-justified
- // col5 - kind right-justified
- // col6 - origin left-justified
- // col7 - comments left-justified
- //
- // The column widths are fixed. They are defined such that, for most cases,
- // an eye-pleasing tabular output is created.
- //
- // Sample output:
- // bool CMSScavengeBeforeRemark = false {product} {default}
- // uintx CMSScheduleRemarkEdenPenetration = 50 {product} {default}
- // size_t CMSScheduleRemarkEdenSizeThreshold = 2097152 {product} {default}
- // uintx CMSScheduleRemarkSamplingRatio = 5 {product} {default}
- // double CMSSmallCoalSurplusPercent = 1.050000 {product} {default}
- // ccstr CompileCommandFile = MyFile.cmd {product} {command line}
- // ccstrlist CompileOnly = Method1
- // CompileOnly += Method2 {product} {command line}
- // | | | | | | |
- // | | | | | | +-- col7
- // | | | | | +-- col6
- // | | | | +-- col5
- // | | | +-- col4
- // | | +-- col3
- // | +-- col2
- // +-- col1
-
- const unsigned int col_spacing = 1;
- const unsigned int col1_pos = 0;
- const unsigned int col1_width = 9;
- const unsigned int col2_pos = col1_pos + col1_width + col_spacing;
- const unsigned int col2_width = 39;
- const unsigned int col3_pos = col2_pos + col2_width + col_spacing;
- const unsigned int col3_width = 2;
- const unsigned int col4_pos = col3_pos + col3_width + col_spacing;
- const unsigned int col4_width = 30;
- const unsigned int col5_pos = col4_pos + col4_width + col_spacing;
- const unsigned int col5_width = 20;
- const unsigned int col6_pos = col5_pos + col5_width + col_spacing;
- const unsigned int col6_width = 15;
- const unsigned int col7_pos = col6_pos + col6_width + col_spacing;
- const unsigned int col7_width = 1;
-
- st->fill_to(col1_pos);
- st->print("%*s", col1_width, _type); // right-justified, therefore width is required.
-
- fill_to_pos(st, col2_pos);
- st->print("%s", _name);
-
- fill_to_pos(st, col3_pos);
- st->print(" ="); // use " =" for proper alignment with multiline ccstr output.
-
- fill_to_pos(st, col4_pos);
- if (is_bool()) {
- st->print("%s", get_bool() ? "true" : "false");
- } else if (is_int()) {
- st->print("%d", get_int());
- } else if (is_uint()) {
- st->print("%u", get_uint());
- } else if (is_intx()) {
- st->print(INTX_FORMAT, get_intx());
- } else if (is_uintx()) {
- st->print(UINTX_FORMAT, get_uintx());
- } else if (is_uint64_t()) {
- st->print(UINT64_FORMAT, get_uint64_t());
- } else if (is_size_t()) {
- st->print(SIZE_FORMAT, get_size_t());
- } else if (is_double()) {
- st->print("%f", get_double());
- } else if (is_ccstr()) {
- // Honor <newline> characters in ccstr: print multiple lines.
- const char* cp = get_ccstr();
- if (cp != NULL) {
- const char* eol;
- while ((eol = strchr(cp, '\n')) != NULL) {
- size_t llen = pointer_delta(eol, cp, sizeof(char));
- st->print("%.*s", (int)llen, cp);
- st->cr();
- cp = eol+1;
- fill_to_pos(st, col2_pos);
- st->print("%s", _name);
- fill_to_pos(st, col3_pos);
- st->print("+=");
- fill_to_pos(st, col4_pos);
- }
- st->print("%s", cp);
- }
- } else {
- st->print("unhandled type %s", _type);
- st->cr();
- return;
- }
-
- fill_to_pos(st, col5_pos);
- print_kind(st, col5_width);
-
- fill_to_pos(st, col6_pos);
- print_origin(st, col6_width);
-
-#ifndef PRODUCT
- if (withComments) {
- fill_to_pos(st, col7_pos);
- st->print("%s", _doc);
- }
-#endif
- st->cr();
- } else if (!is_bool() && !is_ccstr()) {
- // The command line options -XX:+PrintFlags* cause this function to be called
- // for each existing flag to print information pertinent to this flag. The data
- // is displayed in columnar form, with the following layout:
- // col1 - data type, right-justified
- // col2 - name, left-justified
- // col4 - range [ min ... max]
- // col5 - kind right-justified
- // col6 - origin left-justified
- // col7 - comments left-justified
- //
- // The column widths are fixed. They are defined such that, for most cases,
- // an eye-pleasing tabular output is created.
- //
- // Sample output:
- // intx MinPassesBeforeFlush [ 0 ... 9223372036854775807 ] {diagnostic} {default}
- // uintx MinRAMFraction [ 1 ... 18446744073709551615 ] {product} {default}
- // double MinRAMPercentage [ 0.000 ... 100.000 ] {product} {default}
- // uintx MinSurvivorRatio [ 3 ... 18446744073709551615 ] {product} {default}
- // size_t MinTLABSize [ 1 ... 9223372036854775807 ] {product} {default}
- // intx MonitorBound [ 0 ... 2147483647 ] {product} {default}
- // | | | | | |
- // | | | | | +-- col7
- // | | | | +-- col6
- // | | | +-- col5
- // | | +-- col4
- // | +-- col2
- // +-- col1
-
- const unsigned int col_spacing = 1;
- const unsigned int col1_pos = 0;
- const unsigned int col1_width = 9;
- const unsigned int col2_pos = col1_pos + col1_width + col_spacing;
- const unsigned int col2_width = 49;
- const unsigned int col3_pos = col2_pos + col2_width + col_spacing;
- const unsigned int col3_width = 0;
- const unsigned int col4_pos = col3_pos + col3_width + col_spacing;
- const unsigned int col4_width = 60;
- const unsigned int col5_pos = col4_pos + col4_width + col_spacing;
- const unsigned int col5_width = 35;
- const unsigned int col6_pos = col5_pos + col5_width + col_spacing;
- const unsigned int col6_width = 15;
- const unsigned int col7_pos = col6_pos + col6_width + col_spacing;
- const unsigned int col7_width = 1;
-
- st->fill_to(col1_pos);
- st->print("%*s", col1_width, _type); // right-justified, therefore width is required.
-
- fill_to_pos(st, col2_pos);
- st->print("%s", _name);
-
- fill_to_pos(st, col4_pos);
- RangeStrFunc func = NULL;
- if (is_int()) {
- func = Flag::get_int_default_range_str;
- } else if (is_uint()) {
- func = Flag::get_uint_default_range_str;
- } else if (is_intx()) {
- func = Flag::get_intx_default_range_str;
- } else if (is_uintx()) {
- func = Flag::get_uintx_default_range_str;
- } else if (is_uint64_t()) {
- func = Flag::get_uint64_t_default_range_str;
- } else if (is_size_t()) {
- func = Flag::get_size_t_default_range_str;
- } else if (is_double()) {
- func = Flag::get_double_default_range_str;
- } else {
- st->print("unhandled type %s", _type);
- st->cr();
- return;
- }
- CommandLineFlagRangeList::print(st, _name, func);
-
- fill_to_pos(st, col5_pos);
- print_kind(st, col5_width);
-
- fill_to_pos(st, col6_pos);
- print_origin(st, col6_width);
-
-#ifndef PRODUCT
- if (withComments) {
- fill_to_pos(st, col7_pos);
- st->print("%s", _doc);
- }
-#endif
- st->cr();
- }
-}
-
-void Flag::print_kind(outputStream* st, unsigned int width) {
- struct Data {
- int flag;
- const char* name;
- };
-
- Data data[] = {
- { KIND_JVMCI, "JVMCI" },
- { KIND_C1, "C1" },
- { KIND_C2, "C2" },
- { KIND_ARCH, "ARCH" },
- { KIND_PLATFORM_DEPENDENT, "pd" },
- { KIND_PRODUCT, "product" },
- { KIND_MANAGEABLE, "manageable" },
- { KIND_DIAGNOSTIC, "diagnostic" },
- { KIND_EXPERIMENTAL, "experimental" },
- { KIND_COMMERCIAL, "commercial" },
- { KIND_NOT_PRODUCT, "notproduct" },
- { KIND_DEVELOP, "develop" },
- { KIND_LP64_PRODUCT, "lp64_product" },
- { KIND_READ_WRITE, "rw" },
- { -1, "" }
- };
-
- if ((_flags & KIND_MASK) != 0) {
- bool is_first = true;
- const size_t buffer_size = 64;
- size_t buffer_used = 0;
- char kind[buffer_size];
-
- jio_snprintf(kind, buffer_size, "{");
- buffer_used++;
- for (int i = 0; data[i].flag != -1; i++) {
- Data d = data[i];
- if ((_flags & d.flag) != 0) {
- if (is_first) {
- is_first = false;
- } else {
- assert(buffer_used + 1 < buffer_size, "Too small buffer");
- jio_snprintf(kind + buffer_used, buffer_size - buffer_used, " ");
- buffer_used++;
- }
- size_t length = strlen(d.name);
- assert(buffer_used + length < buffer_size, "Too small buffer");
- jio_snprintf(kind + buffer_used, buffer_size - buffer_used, "%s", d.name);
- buffer_used += length;
- }
- }
- assert(buffer_used + 2 <= buffer_size, "Too small buffer");
- jio_snprintf(kind + buffer_used, buffer_size - buffer_used, "}");
- st->print("%*s", width, kind);
- }
-}
-
-void Flag::print_origin(outputStream* st, unsigned int width) {
- int origin = _flags & VALUE_ORIGIN_MASK;
- st->print("{");
- switch(origin) {
- case DEFAULT:
- st->print("default"); break;
- case COMMAND_LINE:
- st->print("command line"); break;
- case ENVIRON_VAR:
- st->print("environment"); break;
- case CONFIG_FILE:
- st->print("config file"); break;
- case MANAGEMENT:
- st->print("management"); break;
- case ERGONOMIC:
- if (_flags & ORIG_COMMAND_LINE) {
- st->print("command line, ");
- }
- st->print("ergonomic"); break;
- case ATTACH_ON_DEMAND:
- st->print("attach"); break;
- case INTERNAL:
- st->print("internal"); break;
- }
- st->print("}");
-}
-
-void Flag::print_as_flag(outputStream* st) {
- if (is_bool()) {
- st->print("-XX:%s%s", get_bool() ? "+" : "-", _name);
- } else if (is_int()) {
- st->print("-XX:%s=%d", _name, get_int());
- } else if (is_uint()) {
- st->print("-XX:%s=%u", _name, get_uint());
- } else if (is_intx()) {
- st->print("-XX:%s=" INTX_FORMAT, _name, get_intx());
- } else if (is_uintx()) {
- st->print("-XX:%s=" UINTX_FORMAT, _name, get_uintx());
- } else if (is_uint64_t()) {
- st->print("-XX:%s=" UINT64_FORMAT, _name, get_uint64_t());
- } else if (is_size_t()) {
- st->print("-XX:%s=" SIZE_FORMAT, _name, get_size_t());
- } else if (is_double()) {
- st->print("-XX:%s=%f", _name, get_double());
- } else if (is_ccstr()) {
- st->print("-XX:%s=", _name);
- const char* cp = get_ccstr();
- if (cp != NULL) {
- // Need to turn embedded '\n's back into separate arguments
- // Not so efficient to print one character at a time,
- // but the choice is to do the transformation to a buffer
- // and print that. And this need not be efficient.
- for (; *cp != '\0'; cp += 1) {
- switch (*cp) {
- default:
- st->print("%c", *cp);
- break;
- case '\n':
- st->print(" -XX:%s=", _name);
- break;
- }
- }
- }
- } else {
- ShouldNotReachHere();
- }
-}
-
-const char* Flag::flag_error_str(Flag::Error error) {
- switch (error) {
- case Flag::MISSING_NAME: return "MISSING_NAME";
- case Flag::MISSING_VALUE: return "MISSING_VALUE";
- case Flag::NON_WRITABLE: return "NON_WRITABLE";
- case Flag::OUT_OF_BOUNDS: return "OUT_OF_BOUNDS";
- case Flag::VIOLATES_CONSTRAINT: return "VIOLATES_CONSTRAINT";
- case Flag::INVALID_FLAG: return "INVALID_FLAG";
- case Flag::ERR_OTHER: return "ERR_OTHER";
- case Flag::SUCCESS: return "SUCCESS";
- default: ShouldNotReachHere(); return "NULL";
- }
-}
-
-// 4991491 do not "optimize out" the was_set false values: omitting them
-// tickles a Microsoft compiler bug causing flagTable to be malformed
-
-#define RUNTIME_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT) },
-#define RUNTIME_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
-#define RUNTIME_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DIAGNOSTIC) },
-#define RUNTIME_PD_DIAGNOSTIC_FLAG_STRUCT(type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DIAGNOSTIC | Flag::KIND_PLATFORM_DEPENDENT) },
-#define RUNTIME_EXPERIMENTAL_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_EXPERIMENTAL) },
-#define RUNTIME_MANAGEABLE_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_MANAGEABLE) },
-#define RUNTIME_PRODUCT_RW_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_PRODUCT | Flag::KIND_READ_WRITE) },
-#define RUNTIME_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP) },
-#define RUNTIME_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
-#define RUNTIME_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_NOT_PRODUCT) },
-
-#define JVMCI_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_PRODUCT) },
-#define JVMCI_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
-#define JVMCI_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_DIAGNOSTIC) },
-#define JVMCI_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_DIAGNOSTIC | Flag::KIND_PLATFORM_DEPENDENT) },
-#define JVMCI_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_EXPERIMENTAL) },
-#define JVMCI_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_DEVELOP) },
-#define JVMCI_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
-#define JVMCI_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_JVMCI | Flag::KIND_NOT_PRODUCT) },
-
-#ifdef _LP64
-#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_LP64_PRODUCT) },
-#else
-#define RUNTIME_LP64_PRODUCT_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
-#endif // _LP64
-
-#define C1_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT) },
-#define C1_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
-#define C1_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DIAGNOSTIC) },
-#define C1_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DIAGNOSTIC | Flag::KIND_PLATFORM_DEPENDENT) },
-#define C1_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP) },
-#define C1_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
-#define C1_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C1 | Flag::KIND_NOT_PRODUCT) },
-
-#define C2_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT) },
-#define C2_PD_PRODUCT_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_PRODUCT | Flag::KIND_PLATFORM_DEPENDENT) },
-#define C2_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DIAGNOSTIC) },
-#define C2_PD_DIAGNOSTIC_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DIAGNOSTIC | Flag::KIND_PLATFORM_DEPENDENT) },
-#define C2_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_EXPERIMENTAL) },
-#define C2_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP) },
-#define C2_PD_DEVELOP_FLAG_STRUCT( type, name, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_DEVELOP | Flag::KIND_PLATFORM_DEPENDENT) },
-#define C2_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_C2 | Flag::KIND_NOT_PRODUCT) },
-
-#define ARCH_PRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_PRODUCT) },
-#define ARCH_DIAGNOSTIC_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DIAGNOSTIC) },
-#define ARCH_EXPERIMENTAL_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_EXPERIMENTAL) },
-#define ARCH_DEVELOP_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_DEVELOP) },
-#define ARCH_NOTPRODUCT_FLAG_STRUCT( type, name, value, doc) { #type, XSTR(name), (void*) &name, NOT_PRODUCT_ARG(doc) Flag::Flags(Flag::DEFAULT | Flag::KIND_ARCH | Flag::KIND_NOT_PRODUCT) },
-
-static Flag flagTable[] = {
- VM_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, \
- RUNTIME_PD_DEVELOP_FLAG_STRUCT, \
- RUNTIME_PRODUCT_FLAG_STRUCT, \
- RUNTIME_PD_PRODUCT_FLAG_STRUCT, \
- RUNTIME_DIAGNOSTIC_FLAG_STRUCT, \
- RUNTIME_PD_DIAGNOSTIC_FLAG_STRUCT, \
- RUNTIME_EXPERIMENTAL_FLAG_STRUCT, \
- RUNTIME_NOTPRODUCT_FLAG_STRUCT, \
- RUNTIME_MANAGEABLE_FLAG_STRUCT, \
- RUNTIME_PRODUCT_RW_FLAG_STRUCT, \
- RUNTIME_LP64_PRODUCT_FLAG_STRUCT, \
- IGNORE_RANGE, \
- IGNORE_CONSTRAINT, \
- IGNORE_WRITEABLE)
-
- RUNTIME_OS_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, \
- RUNTIME_PD_DEVELOP_FLAG_STRUCT, \
- RUNTIME_PRODUCT_FLAG_STRUCT, \
- RUNTIME_PD_PRODUCT_FLAG_STRUCT, \
- RUNTIME_DIAGNOSTIC_FLAG_STRUCT, \
- RUNTIME_PD_DIAGNOSTIC_FLAG_STRUCT, \
- RUNTIME_NOTPRODUCT_FLAG_STRUCT, \
- IGNORE_RANGE, \
- IGNORE_CONSTRAINT, \
- IGNORE_WRITEABLE)
-#if INCLUDE_JVMCI
- JVMCI_FLAGS(JVMCI_DEVELOP_FLAG_STRUCT, \
- JVMCI_PD_DEVELOP_FLAG_STRUCT, \
- JVMCI_PRODUCT_FLAG_STRUCT, \
- JVMCI_PD_PRODUCT_FLAG_STRUCT, \
- JVMCI_DIAGNOSTIC_FLAG_STRUCT, \
- JVMCI_PD_DIAGNOSTIC_FLAG_STRUCT, \
- JVMCI_EXPERIMENTAL_FLAG_STRUCT, \
- JVMCI_NOTPRODUCT_FLAG_STRUCT, \
- IGNORE_RANGE, \
- IGNORE_CONSTRAINT, \
- IGNORE_WRITEABLE)
-#endif // INCLUDE_JVMCI
-#ifdef COMPILER1
- C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, \
- C1_PD_DEVELOP_FLAG_STRUCT, \
- C1_PRODUCT_FLAG_STRUCT, \
- C1_PD_PRODUCT_FLAG_STRUCT, \
- C1_DIAGNOSTIC_FLAG_STRUCT, \
- C1_PD_DIAGNOSTIC_FLAG_STRUCT, \
- C1_NOTPRODUCT_FLAG_STRUCT, \
- IGNORE_RANGE, \
- IGNORE_CONSTRAINT, \
- IGNORE_WRITEABLE)
-#endif // COMPILER1
-#ifdef COMPILER2
- C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, \
- C2_PD_DEVELOP_FLAG_STRUCT, \
- C2_PRODUCT_FLAG_STRUCT, \
- C2_PD_PRODUCT_FLAG_STRUCT, \
- C2_DIAGNOSTIC_FLAG_STRUCT, \
- C2_PD_DIAGNOSTIC_FLAG_STRUCT, \
- C2_EXPERIMENTAL_FLAG_STRUCT, \
- C2_NOTPRODUCT_FLAG_STRUCT, \
- IGNORE_RANGE, \
- IGNORE_CONSTRAINT, \
- IGNORE_WRITEABLE)
-#endif // COMPILER2
- ARCH_FLAGS(ARCH_DEVELOP_FLAG_STRUCT, \
- ARCH_PRODUCT_FLAG_STRUCT, \
- ARCH_DIAGNOSTIC_FLAG_STRUCT, \
- ARCH_EXPERIMENTAL_FLAG_STRUCT, \
- ARCH_NOTPRODUCT_FLAG_STRUCT, \
- IGNORE_RANGE, \
- IGNORE_CONSTRAINT, \
- IGNORE_WRITEABLE)
- FLAGTABLE_EXT
- {0, NULL, NULL}
-};
-
-Flag* Flag::flags = flagTable;
-size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag));
-
-inline bool str_equal(const char* s, size_t s_len, const char* q, size_t q_len) {
- if (s_len != q_len) return false;
- return memcmp(s, q, q_len) == 0;
-}
-
-// Search the flag table for a named flag
-Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
- for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
- if (str_equal(current->_name, current->get_name_length(), name, length)) {
- // Found a matching entry.
- // Don't report notproduct and develop flags in product builds.
- if (current->is_constant_in_binary()) {
- return (return_flag ? current : NULL);
- }
- // Report locked flags only if allowed.
- if (!(current->is_unlocked() || current->is_unlocker())) {
- if (!allow_locked) {
- // disable use of locked flags, e.g. diagnostic, experimental,
- // commercial... until they are explicitly unlocked
- return NULL;
- }
- }
- return current;
- }
- }
- // Flag name is not in the flag table
- return NULL;
-}
-
-// Get or compute the flag name length
-size_t Flag::get_name_length() {
- if (_name_len == 0) {
- _name_len = strlen(_name);
- }
- return _name_len;
-}
-
-Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) {
- float VMOptionsFuzzyMatchSimilarity = 0.7f;
- Flag* match = NULL;
- float score;
- float max_score = -1;
-
- for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
- score = StringUtils::similarity(current->_name, strlen(current->_name), name, length);
- if (score > max_score) {
- max_score = score;
- match = current;
- }
- }
-
- if (!(match->is_unlocked() || match->is_unlocker())) {
- if (!allow_locked) {
- return NULL;
- }
- }
-
- if (max_score < VMOptionsFuzzyMatchSimilarity) {
- return NULL;
- }
-
- return match;
-}
-
-// Returns the address of the index'th element
-static Flag* address_of_flag(CommandLineFlagWithType flag) {
- assert((size_t)flag < Flag::numFlags, "bad command line flag index");
- return &Flag::flags[flag];
-}
-
-bool CommandLineFlagsEx::is_default(CommandLineFlag flag) {
- assert((size_t)flag < Flag::numFlags, "bad command line flag index");
- Flag* f = &Flag::flags[flag];
- return f->is_default();
-}
-
-bool CommandLineFlagsEx::is_ergo(CommandLineFlag flag) {
- assert((size_t)flag < Flag::numFlags, "bad command line flag index");
- Flag* f = &Flag::flags[flag];
- return f->is_ergonomic();
-}
-
-bool CommandLineFlagsEx::is_cmdline(CommandLineFlag flag) {
- assert((size_t)flag < Flag::numFlags, "bad command line flag index");
- Flag* f = &Flag::flags[flag];
- return f->is_command_line();
-}
-
-bool CommandLineFlags::wasSetOnCmdline(const char* name, bool* value) {
- Flag* result = Flag::find_flag((char*)name, strlen(name));
- if (result == NULL) return false;
- *value = result->is_command_line();
- return true;
-}
-
-void CommandLineFlagsEx::setOnCmdLine(CommandLineFlagWithType flag) {
- Flag* faddr = address_of_flag(flag);
- assert(faddr != NULL, "Unknown flag");
- faddr->set_command_line();
-}
-
-template<class E, class T>
-static void trace_flag_changed(const char* name, const T old_value, const T new_value, const Flag::Flags origin) {
- E e;
- e.set_name(name);
- e.set_oldValue(old_value);
- e.set_newValue(new_value);
- e.set_origin(origin);
- e.commit();
-}
-
-static Flag::Error apply_constraint_and_check_range_bool(const char* name, bool new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_bool(new_value, verbose);
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_bool()) return Flag::WRONG_FORMAT;
- *value = result->get_bool();
- return Flag::SUCCESS;
-}
-
-Flag::Error CommandLineFlags::boolAtPut(Flag* flag, bool* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_bool()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_bool(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- bool old_value = flag->get_bool();
- trace_flag_changed<EventBooleanFlagChanged, bool>(name, old_value, *value, origin);
- check = flag->set_bool(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return boolAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_bool(), "wrong flag type");
- return CommandLineFlags::boolAtPut(faddr, &value, origin);
-}
-
-static Flag::Error apply_constraint_and_check_range_int(const char* name, int new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_int(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_int(new_value, verbose);
- }
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::intAt(const char* name, size_t len, int* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_int()) return Flag::WRONG_FORMAT;
- *value = result->get_int();
- return Flag::SUCCESS;
-}
-
-Flag::Error CommandLineFlags::intAtPut(Flag* flag, int* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_int()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_int(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- int old_value = flag->get_int();
- trace_flag_changed<EventIntFlagChanged, s4>(name, old_value, *value, origin);
- check = flag->set_int(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::intAtPut(const char* name, size_t len, int* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return intAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::intAtPut(CommandLineFlagWithType flag, int value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_int(), "wrong flag type");
- return CommandLineFlags::intAtPut(faddr, &value, origin);
-}
-
-static Flag::Error apply_constraint_and_check_range_uint(const char* name, uint new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_uint(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_uint(new_value, verbose);
- }
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::uintAt(const char* name, size_t len, uint* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_uint()) return Flag::WRONG_FORMAT;
- *value = result->get_uint();
- return Flag::SUCCESS;
-}
-
-Flag::Error CommandLineFlags::uintAtPut(Flag* flag, uint* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_uint()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_uint(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- uint old_value = flag->get_uint();
- trace_flag_changed<EventUnsignedIntFlagChanged, u4>(name, old_value, *value, origin);
- check = flag->set_uint(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return uintAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::uintAtPut(CommandLineFlagWithType flag, uint value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_uint(), "wrong flag type");
- return CommandLineFlags::uintAtPut(faddr, &value, origin);
-}
-
-Flag::Error CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_intx()) return Flag::WRONG_FORMAT;
- *value = result->get_intx();
- return Flag::SUCCESS;
-}
-
-static Flag::Error apply_constraint_and_check_range_intx(const char* name, intx new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_intx(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_intx(new_value, verbose);
- }
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::intxAtPut(Flag* flag, intx* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_intx()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_intx(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- intx old_value = flag->get_intx();
- trace_flag_changed<EventLongFlagChanged, intx>(name, old_value, *value, origin);
- check = flag->set_intx(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return intxAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_intx(), "wrong flag type");
- return CommandLineFlags::intxAtPut(faddr, &value, origin);
-}
-
-Flag::Error CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_uintx()) return Flag::WRONG_FORMAT;
- *value = result->get_uintx();
- return Flag::SUCCESS;
-}
-
-static Flag::Error apply_constraint_and_check_range_uintx(const char* name, uintx new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_uintx(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_uintx(new_value, verbose);
- }
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::uintxAtPut(Flag* flag, uintx* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_uintx()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_uintx(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- uintx old_value = flag->get_uintx();
- trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
- check = flag->set_uintx(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return uintxAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_uintx(), "wrong flag type");
- return CommandLineFlags::uintxAtPut(faddr, &value, origin);
-}
-
-Flag::Error CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_uint64_t()) return Flag::WRONG_FORMAT;
- *value = result->get_uint64_t();
- return Flag::SUCCESS;
-}
-
-static Flag::Error apply_constraint_and_check_range_uint64_t(const char* name, uint64_t new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_uint64_t(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_uint64_t(new_value, verbose);
- }
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::uint64_tAtPut(Flag* flag, uint64_t* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_uint64_t()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_uint64_t(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- uint64_t old_value = flag->get_uint64_t();
- trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
- check = flag->set_uint64_t(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return uint64_tAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_uint64_t(), "wrong flag type");
- return CommandLineFlags::uint64_tAtPut(faddr, &value, origin);
-}
-
-Flag::Error CommandLineFlags::size_tAt(const char* name, size_t len, size_t* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_size_t()) return Flag::WRONG_FORMAT;
- *value = result->get_size_t();
- return Flag::SUCCESS;
-}
-
-static Flag::Error apply_constraint_and_check_range_size_t(const char* name, size_t new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_size_t(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_size_t(new_value, verbose);
- }
- }
- return status;
-}
-
-
-Flag::Error CommandLineFlags::size_tAtPut(Flag* flag, size_t* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_size_t()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_size_t(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- size_t old_value = flag->get_size_t();
- trace_flag_changed<EventUnsignedLongFlagChanged, u8>(name, old_value, *value, origin);
- check = flag->set_size_t(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return size_tAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::size_tAtPut(CommandLineFlagWithType flag, size_t value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_size_t(), "wrong flag type");
- return CommandLineFlags::size_tAtPut(faddr, &value, origin);
-}
-
-Flag::Error CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_double()) return Flag::WRONG_FORMAT;
- *value = result->get_double();
- return Flag::SUCCESS;
-}
-
-static Flag::Error apply_constraint_and_check_range_double(const char* name, double new_value, bool verbose) {
- Flag::Error status = Flag::SUCCESS;
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
- if (range != NULL) {
- status = range->check_double(new_value, verbose);
- }
- if (status == Flag::SUCCESS) {
- CommandLineFlagConstraint* constraint = CommandLineFlagConstraintList::find_if_needs_check(name);
- if (constraint != NULL) {
- status = constraint->apply_double(new_value, verbose);
- }
- }
- return status;
-}
-
-Flag::Error CommandLineFlags::doubleAtPut(Flag* flag, double* value, Flag::Flags origin) {
- const char* name;
- if (flag == NULL) return Flag::INVALID_FLAG;
- if (!flag->is_double()) return Flag::WRONG_FORMAT;
- name = flag->_name;
- Flag::Error check = apply_constraint_and_check_range_double(name, *value, !CommandLineFlagConstraintList::validated_after_ergo());
- if (check != Flag::SUCCESS) return check;
- double old_value = flag->get_double();
- trace_flag_changed<EventDoubleFlagChanged, double>(name, old_value, *value, origin);
- check = flag->set_double(*value);
- *value = old_value;
- flag->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlags::doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- return doubleAtPut(result, value, origin);
-}
-
-Flag::Error CommandLineFlagsEx::doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_double(), "wrong flag type");
- return CommandLineFlags::doubleAtPut(faddr, &value, origin);
-}
-
-Flag::Error CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
- Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_ccstr()) return Flag::WRONG_FORMAT;
- *value = result->get_ccstr();
- return Flag::SUCCESS;
-}
-
-Flag::Error CommandLineFlags::ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin) {
- Flag* result = Flag::find_flag(name, len);
- if (result == NULL) return Flag::INVALID_FLAG;
- if (!result->is_ccstr()) return Flag::WRONG_FORMAT;
- ccstr old_value = result->get_ccstr();
- trace_flag_changed<EventStringFlagChanged, const char*>(name, old_value, *value, origin);
- char* new_value = NULL;
- if (*value != NULL) {
- new_value = os::strdup_check_oom(*value);
- }
- Flag::Error check = result->set_ccstr(new_value);
- if (result->is_default() && old_value != NULL) {
- // Prior value is NOT heap allocated, but was a literal constant.
- old_value = os::strdup_check_oom(old_value);
- }
- *value = old_value;
- result->set_origin(origin);
- return check;
-}
-
-Flag::Error CommandLineFlagsEx::ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin) {
- Flag* faddr = address_of_flag(flag);
- guarantee(faddr != NULL && faddr->is_ccstr(), "wrong flag type");
- ccstr old_value = faddr->get_ccstr();
- trace_flag_changed<EventStringFlagChanged, const char*>(faddr->_name, old_value, value, origin);
- char* new_value = os::strdup_check_oom(value);
- Flag::Error check = faddr->set_ccstr(new_value);
- if (!faddr->is_default() && old_value != NULL) {
- // Prior value is heap allocated so free it.
- FREE_C_HEAP_ARRAY(char, old_value);
- }
- faddr->set_origin(origin);
- return check;
-}
-
-extern "C" {
- static int compare_flags(const void* void_a, const void* void_b) {
- return strcmp((*((Flag**) void_a))->_name, (*((Flag**) void_b))->_name);
- }
-}
-
-void CommandLineFlags::printSetFlags(outputStream* out) {
- // Print which flags were set on the command line
- // note: this method is called before the thread structure is in place
- // which means resource allocation cannot be used.
-
- // The last entry is the null entry.
- const size_t length = Flag::numFlags - 1;
-
- // Sort
- Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtArguments);
- for (size_t i = 0; i < length; i++) {
- array[i] = &flagTable[i];
- }
- qsort(array, length, sizeof(Flag*), compare_flags);
-
- // Print
- for (size_t i = 0; i < length; i++) {
- if (array[i]->get_origin() /* naked field! */) {
- array[i]->print_as_flag(out);
- out->print(" ");
- }
- }
- out->cr();
- FREE_C_HEAP_ARRAY(Flag*, array);
-}
-
-#ifndef PRODUCT
-
-void CommandLineFlags::verify() {
- assert(Arguments::check_vm_args_consistency(), "Some flag settings conflict");
-}
-
-#endif // PRODUCT
-
-void CommandLineFlags::printFlags(outputStream* out, bool withComments, bool printRanges) {
- // Print the flags sorted by name
- // note: this method is called before the thread structure is in place
- // which means resource allocation cannot be used.
-
- // The last entry is the null entry.
- const size_t length = Flag::numFlags - 1;
-
- // Sort
- Flag** array = NEW_C_HEAP_ARRAY(Flag*, length, mtArguments);
- for (size_t i = 0; i < length; i++) {
- array[i] = &flagTable[i];
- }
- qsort(array, length, sizeof(Flag*), compare_flags);
-
- // Print
- if (!printRanges) {
- out->print_cr("[Global flags]");
- } else {
- out->print_cr("[Global flags ranges]");
- }
-
- for (size_t i = 0; i < length; i++) {
- if (array[i]->is_unlocked()) {
- array[i]->print_on(out, withComments, printRanges);
- }
- }
- FREE_C_HEAP_ARRAY(Flag*, array);
-}
--- a/src/hotspot/share/runtime/globals.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -108,349 +108,6 @@
#endif // no compilers
-// string type aliases used only in this file
-typedef const char* ccstr;
-typedef const char* ccstrlist; // represents string arguments which accumulate
-
-// function type that will construct default range string
-typedef const char* (*RangeStrFunc)(void);
-
-struct Flag {
- enum Flags {
- // latest value origin
- DEFAULT = 0,
- COMMAND_LINE = 1,
- ENVIRON_VAR = 2,
- CONFIG_FILE = 3,
- MANAGEMENT = 4,
- ERGONOMIC = 5,
- ATTACH_ON_DEMAND = 6,
- INTERNAL = 7,
-
- LAST_VALUE_ORIGIN = INTERNAL,
- VALUE_ORIGIN_BITS = 4,
- VALUE_ORIGIN_MASK = right_n_bits(VALUE_ORIGIN_BITS),
-
- // flag kind
- KIND_PRODUCT = 1 << 4,
- KIND_MANAGEABLE = 1 << 5,
- KIND_DIAGNOSTIC = 1 << 6,
- KIND_EXPERIMENTAL = 1 << 7,
- KIND_NOT_PRODUCT = 1 << 8,
- KIND_DEVELOP = 1 << 9,
- KIND_PLATFORM_DEPENDENT = 1 << 10,
- KIND_READ_WRITE = 1 << 11,
- KIND_C1 = 1 << 12,
- KIND_C2 = 1 << 13,
- KIND_ARCH = 1 << 14,
- KIND_LP64_PRODUCT = 1 << 15,
- KIND_COMMERCIAL = 1 << 16,
- KIND_JVMCI = 1 << 17,
-
- // set this bit if the flag was set on the command line
- ORIG_COMMAND_LINE = 1 << 18,
-
- KIND_MASK = ~(VALUE_ORIGIN_MASK | ORIG_COMMAND_LINE)
- };
-
- enum Error {
- // no error
- SUCCESS = 0,
- // flag name is missing
- MISSING_NAME,
- // flag value is missing
- MISSING_VALUE,
- // error parsing the textual form of the value
- WRONG_FORMAT,
- // flag is not writable
- NON_WRITABLE,
- // flag value is outside of its bounds
- OUT_OF_BOUNDS,
- // flag value violates its constraint
- VIOLATES_CONSTRAINT,
- // there is no flag with the given name
- INVALID_FLAG,
- // the flag can only be set only on command line during invocation of the VM
- COMMAND_LINE_ONLY,
- // the flag may only be set once
- SET_ONLY_ONCE,
- // the flag is not writable in this combination of product/debug build
- CONSTANT,
- // other, unspecified error related to setting the flag
- ERR_OTHER
- };
-
- enum MsgType {
- NONE = 0,
- DIAGNOSTIC_FLAG_BUT_LOCKED,
- EXPERIMENTAL_FLAG_BUT_LOCKED,
- DEVELOPER_FLAG_BUT_PRODUCT_BUILD,
- NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD,
- COMMERCIAL_FLAG_BUT_DISABLED,
- COMMERCIAL_FLAG_BUT_LOCKED
- };
-
- const char* _type;
- const char* _name;
- void* _addr;
- NOT_PRODUCT(const char* _doc;)
- Flags _flags;
- size_t _name_len;
-
- // points to all Flags static array
- static Flag* flags;
-
- // number of flags
- static size_t numFlags;
-
- static Flag* find_flag(const char* name) { return find_flag(name, strlen(name), true, true); };
- static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
- static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
-
- static const char* get_int_default_range_str();
- static const char* get_uint_default_range_str();
- static const char* get_intx_default_range_str();
- static const char* get_uintx_default_range_str();
- static const char* get_uint64_t_default_range_str();
- static const char* get_size_t_default_range_str();
- static const char* get_double_default_range_str();
-
- Flag::Error check_writable(bool changed);
-
- bool is_bool() const;
- bool get_bool() const;
- Flag::Error set_bool(bool value);
-
- bool is_int() const;
- int get_int() const;
- Flag::Error set_int(int value);
-
- bool is_uint() const;
- uint get_uint() const;
- Flag::Error set_uint(uint value);
-
- bool is_intx() const;
- intx get_intx() const;
- Flag::Error set_intx(intx value);
-
- bool is_uintx() const;
- uintx get_uintx() const;
- Flag::Error set_uintx(uintx value);
-
- bool is_uint64_t() const;
- uint64_t get_uint64_t() const;
- Flag::Error set_uint64_t(uint64_t value);
-
- bool is_size_t() const;
- size_t get_size_t() const;
- Flag::Error set_size_t(size_t value);
-
- bool is_double() const;
- double get_double() const;
- Flag::Error set_double(double value);
-
- bool is_ccstr() const;
- bool ccstr_accumulates() const;
- ccstr get_ccstr() const;
- Flag::Error set_ccstr(ccstr value);
-
- Flags get_origin();
- void set_origin(Flags origin);
-
- size_t get_name_length();
-
- bool is_default();
- bool is_ergonomic();
- bool is_command_line();
- void set_command_line();
-
- bool is_product() const;
- bool is_manageable() const;
- bool is_diagnostic() const;
- bool is_experimental() const;
- bool is_notproduct() const;
- bool is_develop() const;
- bool is_read_write() const;
- bool is_commercial() const;
-
- bool is_constant_in_binary() const;
-
- bool is_unlocker() const;
- bool is_unlocked() const;
- bool is_writeable() const;
- bool is_external() const;
-
- bool is_unlocker_ext() const;
- bool is_unlocked_ext() const;
- bool is_writeable_ext() const;
- bool is_external_ext() const;
-
- void clear_diagnostic();
-
- Flag::MsgType get_locked_message(char*, int) const;
- Flag::MsgType get_locked_message_ext(char*, int) const;
-
- // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
- void print_on(outputStream* st, bool withComments = false, bool printRanges = false);
- void print_kind(outputStream* st, unsigned int width);
- void print_origin(outputStream* st, unsigned int width);
- void print_as_flag(outputStream* st);
-
- static const char* flag_error_str(Flag::Error error);
-};
-
-// debug flags control various aspects of the VM and are global accessible
-
-// use FlagSetting to temporarily change some debug flag
-// e.g. FlagSetting fs(DebugThisAndThat, true);
-// restored to previous value upon leaving scope
-class FlagSetting {
- bool val;
- bool* flag;
- public:
- FlagSetting(bool& fl, bool newValue) { flag = &fl; val = fl; fl = newValue; }
- ~FlagSetting() { *flag = val; }
-};
-
-
-class CounterSetting {
- intx* counter;
- public:
- CounterSetting(intx* cnt) { counter = cnt; (*counter)++; }
- ~CounterSetting() { (*counter)--; }
-};
-
-class IntFlagSetting {
- int val;
- int* flag;
- public:
- IntFlagSetting(int& fl, int newValue) { flag = &fl; val = fl; fl = newValue; }
- ~IntFlagSetting() { *flag = val; }
-};
-
-class UIntFlagSetting {
- uint val;
- uint* flag;
- public:
- UIntFlagSetting(uint& fl, uint newValue) { flag = &fl; val = fl; fl = newValue; }
- ~UIntFlagSetting() { *flag = val; }
-};
-
-class UIntXFlagSetting {
- uintx val;
- uintx* flag;
- public:
- UIntXFlagSetting(uintx& fl, uintx newValue) { flag = &fl; val = fl; fl = newValue; }
- ~UIntXFlagSetting() { *flag = val; }
-};
-
-class DoubleFlagSetting {
- double val;
- double* flag;
- public:
- DoubleFlagSetting(double& fl, double newValue) { flag = &fl; val = fl; fl = newValue; }
- ~DoubleFlagSetting() { *flag = val; }
-};
-
-class SizeTFlagSetting {
- size_t val;
- size_t* flag;
- public:
- SizeTFlagSetting(size_t& fl, size_t newValue) { flag = &fl; val = fl; fl = newValue; }
- ~SizeTFlagSetting() { *flag = val; }
-};
-
-// Helper class for temporarily saving the value of a flag during a scope.
-template <size_t SIZE>
-class FlagGuard {
- unsigned char _value[SIZE];
- void* const _addr;
-
- // Hide operator new, this class should only be allocated on the stack.
- // NOTE: Cannot include memory/allocation.hpp here due to circular
- // dependencies.
- void* operator new(size_t size) throw();
- void* operator new [](size_t size) throw();
-
- public:
- FlagGuard(void* flag_addr) : _addr(flag_addr) {
- memcpy(_value, _addr, SIZE);
- }
-
- ~FlagGuard() {
- memcpy(_addr, _value, SIZE);
- }
-};
-
-#define FLAG_GUARD(f) FlagGuard<sizeof(f)> f ## _guard(&f)
-
-class CommandLineFlags {
-public:
- static Flag::Error boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false) { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error boolAtPut(Flag* flag, bool* value, Flag::Flags origin);
- static Flag::Error boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
- static Flag::Error boolAtPut(const char* name, bool* value, Flag::Flags origin) { return boolAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error intAt(const char* name, size_t len, int* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error intAt(const char* name, int* value, bool allow_locked = false, bool return_flag = false) { return intAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error intAtPut(Flag* flag, int* value, Flag::Flags origin);
- static Flag::Error intAtPut(const char* name, size_t len, int* value, Flag::Flags origin);
- static Flag::Error intAtPut(const char* name, int* value, Flag::Flags origin) { return intAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error uintAt(const char* name, size_t len, uint* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error uintAt(const char* name, uint* value, bool allow_locked = false, bool return_flag = false) { return uintAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error uintAtPut(Flag* flag, uint* value, Flag::Flags origin);
- static Flag::Error uintAtPut(const char* name, size_t len, uint* value, Flag::Flags origin);
- static Flag::Error uintAtPut(const char* name, uint* value, Flag::Flags origin) { return uintAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false) { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error intxAtPut(Flag* flag, intx* value, Flag::Flags origin);
- static Flag::Error intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
- static Flag::Error intxAtPut(const char* name, intx* value, Flag::Flags origin) { return intxAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false) { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error uintxAtPut(Flag* flag, uintx* value, Flag::Flags origin);
- static Flag::Error uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
- static Flag::Error uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error size_tAt(const char* name, size_t len, size_t* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error size_tAt(const char* name, size_t* value, bool allow_locked = false, bool return_flag = false) { return size_tAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error size_tAtPut(Flag* flag, size_t* value, Flag::Flags origin);
- static Flag::Error size_tAtPut(const char* name, size_t len, size_t* value, Flag::Flags origin);
- static Flag::Error size_tAtPut(const char* name, size_t* value, Flag::Flags origin) { return size_tAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error uint64_tAtPut(Flag* flag, uint64_t* value, Flag::Flags origin);
- static Flag::Error uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
- static Flag::Error uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false) { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
- static Flag::Error doubleAtPut(Flag* flag, double* value, Flag::Flags origin);
- static Flag::Error doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
- static Flag::Error doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
-
- static Flag::Error ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
- static Flag::Error ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false) { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
- // Contract: Flag will make private copy of the incoming value.
- // Outgoing value is always malloc-ed, and caller MUST call free.
- static Flag::Error ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
- static Flag::Error ccstrAtPut(const char* name, ccstr* value, Flag::Flags origin) { return ccstrAtPut(name, strlen(name), value, origin); }
-
- // Returns false if name is not a command line flag.
- static bool wasSetOnCmdline(const char* name, bool* value);
- static void printSetFlags(outputStream* out);
-
- // printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
- static void printFlags(outputStream* out, bool withComments, bool printRanges = false);
-
- static void verify() PRODUCT_RETURN;
-};
-
// use this for flags that are true by default in the debug version but
// false in the optimized version, and vice versa
#ifdef ASSERT
@@ -536,10 +193,10 @@
// it can be done in the same way as product_rw.
//
// range is a macro that will expand to min and max arguments for range
-// checking code if provided - see commandLineFlagRangeList.hpp
+// checking code if provided - see jvmFlagRangeList.hpp
//
// constraint is a macro that will expand to custom function call
-// for constraint checking if provided - see commandLineFlagConstraintList.hpp
+// for constraint checking if provided - see jvmFlagConstraintList.hpp
//
// writeable is a macro that controls if and how the value can change during the runtime
//
--- a/src/hotspot/share/runtime/globals_ext.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/globals_ext.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -25,13 +25,15 @@
#ifndef SHARE_VM_RUNTIME_GLOBALS_EXT_HPP
#define SHARE_VM_RUNTIME_GLOBALS_EXT_HPP
+#include "runtime/flags/jvmFlag.hpp"
+
// globals_extension.hpp extension
-// Additional CommandLineFlags enum values
-#define COMMANDLINEFLAG_EXT
+// Additional JVMFlags enum values
+#define JVMFLAGS_EXT
-// Additional CommandLineFlagsWithType enum values
-#define COMMANDLINEFLAGWITHTYPE_EXT
+// Additional JVMFlagsWithType enum values
+#define JVMFLAGSWITHTYPE_EXT
// globals.cpp extension
@@ -45,26 +47,26 @@
// Default method implementations
-inline bool Flag::is_unlocker_ext() const {
+inline bool JVMFlag::is_unlocker_ext() const {
return false;
}
-inline bool Flag::is_unlocked_ext() const {
+inline bool JVMFlag::is_unlocked_ext() const {
return true;
}
-inline bool Flag::is_writeable_ext() const {
+inline bool JVMFlag::is_writeable_ext() const {
return false;
}
-inline bool Flag::is_external_ext() const {
+inline bool JVMFlag::is_external_ext() const {
return false;
}
-inline Flag::MsgType Flag::get_locked_message_ext(char* buf, int buflen) const {
+inline JVMFlag::MsgType JVMFlag::get_locked_message_ext(char* buf, int buflen) const {
assert(buf != NULL, "Buffer cannot be NULL");
buf[0] = '\0';
- return Flag::NONE;
+ return JVMFlag::NONE;
}
#endif // SHARE_VM_RUNTIME_GLOBALS_EXT_HPP
--- a/src/hotspot/share/runtime/globals_extension.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/globals_extension.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -27,7 +27,6 @@
#include "runtime/globals.hpp"
#include "utilities/macros.hpp"
-#include "utilities/macros.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmci_globals.hpp"
#endif
@@ -164,9 +163,9 @@
IGNORE_RANGE, \
IGNORE_CONSTRAINT, \
IGNORE_WRITEABLE)
- COMMANDLINEFLAG_EXT
- NUM_CommandLineFlag
-} CommandLineFlag;
+ JVMFLAGS_EXT
+ NUM_JVMFlags
+} JVMFlags;
// Construct enum of Flag_<cmdline-arg>_<type> constants.
@@ -293,19 +292,19 @@
IGNORE_RANGE,
IGNORE_CONSTRAINT,
IGNORE_WRITEABLE)
- COMMANDLINEFLAGWITHTYPE_EXT
- NUM_CommandLineFlagWithType
-} CommandLineFlagWithType;
+ JVMFLAGSWITHTYPE_EXT
+ NUM_JVMFlagsWithType
+} JVMFlagsWithType;
-#define FLAG_IS_DEFAULT(name) (CommandLineFlagsEx::is_default(FLAG_MEMBER(name)))
-#define FLAG_IS_ERGO(name) (CommandLineFlagsEx::is_ergo(FLAG_MEMBER(name)))
-#define FLAG_IS_CMDLINE(name) (CommandLineFlagsEx::is_cmdline(FLAG_MEMBER(name)))
+#define FLAG_IS_DEFAULT(name) (JVMFlagEx::is_default(FLAG_MEMBER(name)))
+#define FLAG_IS_ERGO(name) (JVMFlagEx::is_ergo(FLAG_MEMBER(name)))
+#define FLAG_IS_CMDLINE(name) (JVMFlagEx::is_cmdline(FLAG_MEMBER(name)))
#define FLAG_SET_DEFAULT(name, value) ((name) = (value))
-#define FLAG_SET_CMDLINE(type, name, value) (CommandLineFlagsEx::setOnCmdLine(FLAG_MEMBER_WITH_TYPE(name, type)), \
- CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name, type), (type)(value), Flag::COMMAND_LINE))
-#define FLAG_SET_ERGO(type, name, value) (CommandLineFlagsEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name, type), (type)(value), Flag::ERGONOMIC))
+#define FLAG_SET_CMDLINE(type, name, value) (JVMFlagEx::setOnCmdLine(FLAG_MEMBER_WITH_TYPE(name, type)), \
+ JVMFlagEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name, type), (type)(value), JVMFlag::COMMAND_LINE))
+#define FLAG_SET_ERGO(type, name, value) (JVMFlagEx::type##AtPut(FLAG_MEMBER_WITH_TYPE(name, type), (type)(value), JVMFlag::ERGONOMIC))
#define FLAG_SET_ERGO_IF_DEFAULT(type, name, value) \
do { \
if (FLAG_IS_DEFAULT(name)) { \
@@ -313,26 +312,26 @@
} \
} while (0)
-// Can't put the following in CommandLineFlags because
+// Can't put the following in JVMFlags because
// of a circular dependency on the enum definition.
-class CommandLineFlagsEx : CommandLineFlags {
+class JVMFlagEx : JVMFlag {
public:
- static Flag::Error boolAtPut(CommandLineFlagWithType flag, bool value, Flag::Flags origin);
- static Flag::Error intAtPut(CommandLineFlagWithType flag, int value, Flag::Flags origin);
- static Flag::Error uintAtPut(CommandLineFlagWithType flag, uint value, Flag::Flags origin);
- static Flag::Error intxAtPut(CommandLineFlagWithType flag, intx value, Flag::Flags origin);
- static Flag::Error uintxAtPut(CommandLineFlagWithType flag, uintx value, Flag::Flags origin);
- static Flag::Error uint64_tAtPut(CommandLineFlagWithType flag, uint64_t value, Flag::Flags origin);
- static Flag::Error size_tAtPut(CommandLineFlagWithType flag, size_t value, Flag::Flags origin);
- static Flag::Error doubleAtPut(CommandLineFlagWithType flag, double value, Flag::Flags origin);
+ static JVMFlag::Error boolAtPut(JVMFlagsWithType flag, bool value, JVMFlag::Flags origin);
+ static JVMFlag::Error intAtPut(JVMFlagsWithType flag, int value, JVMFlag::Flags origin);
+ static JVMFlag::Error uintAtPut(JVMFlagsWithType flag, uint value, JVMFlag::Flags origin);
+ static JVMFlag::Error intxAtPut(JVMFlagsWithType flag, intx value, JVMFlag::Flags origin);
+ static JVMFlag::Error uintxAtPut(JVMFlagsWithType flag, uintx value, JVMFlag::Flags origin);
+ static JVMFlag::Error uint64_tAtPut(JVMFlagsWithType flag, uint64_t value, JVMFlag::Flags origin);
+ static JVMFlag::Error size_tAtPut(JVMFlagsWithType flag, size_t value, JVMFlag::Flags origin);
+ static JVMFlag::Error doubleAtPut(JVMFlagsWithType flag, double value, JVMFlag::Flags origin);
// Contract: Flag will make private copy of the incoming value
- static Flag::Error ccstrAtPut(CommandLineFlagWithType flag, ccstr value, Flag::Flags origin);
+ static JVMFlag::Error ccstrAtPut(JVMFlagsWithType flag, ccstr value, JVMFlag::Flags origin);
- static bool is_default(CommandLineFlag flag);
- static bool is_ergo(CommandLineFlag flag);
- static bool is_cmdline(CommandLineFlag flag);
+ static bool is_default(JVMFlags flag);
+ static bool is_ergo(JVMFlags flag);
+ static bool is_cmdline(JVMFlags flag);
- static void setOnCmdLine(CommandLineFlagWithType flag);
+ static void setOnCmdLine(JVMFlagsWithType flag);
};
#endif // SHARE_VM_RUNTIME_GLOBALS_EXTENSION_HPP
--- a/src/hotspot/share/runtime/handshake.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/handshake.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_HANDSHAKE_HPP
#include "memory/allocation.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/semaphore.hpp"
class ThreadClosure;
--- a/src/hotspot/share/runtime/init.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/init.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -30,7 +30,7 @@
#include "interpreter/bytecodes.hpp"
#include "memory/universe.hpp"
#include "prims/methodHandles.hpp"
-#include "runtime/globals.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/init.hpp"
@@ -155,7 +155,7 @@
// All the flags that get adjusted by VM_Version_init and os::init_2
// have been set so dump the flags now.
if (PrintFlagsFinal || PrintFlagsRanges) {
- CommandLineFlags::printFlags(tty, false, PrintFlagsRanges);
+ JVMFlag::printFlags(tty, false, PrintFlagsRanges);
}
return JNI_OK;
--- a/src/hotspot/share/runtime/java.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/java.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -54,6 +54,7 @@
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
--- a/src/hotspot/share/runtime/jniHandles.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/jniHandles.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -39,6 +39,16 @@
OopStorage* JNIHandles::_global_handles = NULL;
OopStorage* JNIHandles::_weak_global_handles = NULL;
+OopStorage* JNIHandles::global_handles() {
+ assert(_global_handles != NULL, "Uninitialized JNI global handles");
+ return _global_handles;
+}
+
+OopStorage* JNIHandles::weak_global_handles() {
+ assert(_weak_global_handles != NULL, "Uninitialized JNI weak global handles");
+ return _weak_global_handles;
+}
+
jobject JNIHandles::make_local(oop obj) {
if (obj == NULL) {
@@ -96,7 +106,7 @@
if (!obj.is_null()) {
// ignore null handles
assert(oopDesc::is_oop(obj()), "not an oop");
- oop* ptr = _global_handles->allocate();
+ oop* ptr = global_handles()->allocate();
// Return NULL on allocation failure.
if (ptr != NULL) {
assert(*ptr == NULL, "invariant");
@@ -120,7 +130,7 @@
if (!obj.is_null()) {
// ignore null handles
assert(oopDesc::is_oop(obj()), "not an oop");
- oop* ptr = _weak_global_handles->allocate();
+ oop* ptr = weak_global_handles()->allocate();
// Return NULL on allocation failure.
if (ptr != NULL) {
assert(*ptr == NULL, "invariant");
@@ -167,7 +177,7 @@
assert(!is_jweak(handle), "wrong method for detroying jweak");
oop* oop_ptr = jobject_ptr(handle);
RootAccess<IN_CONCURRENT_ROOT>::oop_store(oop_ptr, (oop)NULL);
- _global_handles->release(oop_ptr);
+ global_handles()->release(oop_ptr);
}
}
@@ -177,23 +187,23 @@
assert(is_jweak(handle), "JNI handle not jweak");
oop* oop_ptr = jweak_ptr(handle);
RootAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL);
- _weak_global_handles->release(oop_ptr);
+ weak_global_handles()->release(oop_ptr);
}
}
void JNIHandles::oops_do(OopClosure* f) {
- _global_handles->oops_do(f);
+ global_handles()->oops_do(f);
}
void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
- _weak_global_handles->weak_oops_do(is_alive, f);
+ weak_global_handles()->weak_oops_do(is_alive, f);
}
void JNIHandles::weak_oops_do(OopClosure* f) {
- _weak_global_handles->weak_oops_do(f);
+ weak_global_handles()->weak_oops_do(f);
}
@@ -216,11 +226,11 @@
assert(handle != NULL, "precondition");
jobjectRefType result = JNIInvalidRefType;
if (is_jweak(handle)) {
- if (is_storage_handle(_weak_global_handles, jweak_ptr(handle))) {
+ if (is_storage_handle(weak_global_handles(), jweak_ptr(handle))) {
result = JNIWeakGlobalRefType;
}
} else {
- switch (_global_handles->allocation_status(jobject_ptr(handle))) {
+ switch (global_handles()->allocation_status(jobject_ptr(handle))) {
case OopStorage::ALLOCATED_ENTRY:
result = JNIGlobalRefType;
break;
@@ -277,33 +287,31 @@
bool JNIHandles::is_global_handle(jobject handle) {
assert(handle != NULL, "precondition");
- return !is_jweak(handle) && is_storage_handle(_global_handles, jobject_ptr(handle));
+ return !is_jweak(handle) && is_storage_handle(global_handles(), jobject_ptr(handle));
}
bool JNIHandles::is_weak_global_handle(jobject handle) {
assert(handle != NULL, "precondition");
- return is_jweak(handle) && is_storage_handle(_weak_global_handles, jweak_ptr(handle));
+ return is_jweak(handle) && is_storage_handle(weak_global_handles(), jweak_ptr(handle));
}
size_t JNIHandles::global_handle_memory_usage() {
- return _global_handles->total_memory_usage();
+ return global_handles()->total_memory_usage();
}
size_t JNIHandles::weak_global_handle_memory_usage() {
- return _weak_global_handles->total_memory_usage();
+ return weak_global_handles()->total_memory_usage();
}
// We assume this is called at a safepoint: no lock is needed.
void JNIHandles::print_on(outputStream* st) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
- assert(_global_handles != NULL && _weak_global_handles != NULL,
- "JNIHandles not initialized");
st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
- _global_handles->allocation_count(),
- _weak_global_handles->allocation_count());
+ global_handles()->allocation_count(),
+ weak_global_handles()->allocation_count());
st->cr();
st->flush();
}
--- a/src/hotspot/share/runtime/jniHandles.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/jniHandles.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -115,6 +115,9 @@
static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
// Traversal of weak global handles.
static void weak_oops_do(OopClosure* f);
+
+ static OopStorage* global_handles();
+ static OopStorage* weak_global_handles();
};
--- a/src/hotspot/share/runtime/mutexLocker.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/mutexLocker.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_MUTEXLOCKER_HPP
#include "memory/allocation.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/mutex.hpp"
// Mutexes used in the VM.
--- a/src/hotspot/share/runtime/thread.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/thread.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -63,12 +63,11 @@
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/commandLineFlagConstraintList.hpp"
-#include "runtime/commandLineFlagWriteableList.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
+#include "runtime/flags/jvmFlagConstraintList.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
+#include "runtime/flags/jvmFlagWriteableList.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/globals.hpp"
#include "runtime/handshake.hpp"
#include "runtime/init.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -3663,17 +3662,17 @@
if (ergo_result != JNI_OK) return ergo_result;
// Final check of all ranges after ergonomics which may change values.
- if (!CommandLineFlagRangeList::check_ranges()) {
+ if (!JVMFlagRangeList::check_ranges()) {
return JNI_EINVAL;
}
// Final check of all 'AfterErgo' constraints after ergonomics which may change values.
- bool constraint_result = CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterErgo);
+ bool constraint_result = JVMFlagConstraintList::check_constraints(JVMFlagConstraint::AfterErgo);
if (!constraint_result) {
return JNI_EINVAL;
}
- CommandLineFlagWriteableList::mark_startup();
+ JVMFlagWriteableList::mark_startup();
if (PauseAtStartup) {
os::pause();
--- a/src/hotspot/share/runtime/vmStructs.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -83,6 +83,7 @@
#include "prims/jvmtiAgentThread.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/globals.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
@@ -718,7 +719,7 @@
nonstatic_field(nmethod, _osr_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_link, nmethod*) \
nonstatic_field(nmethod, _scavenge_root_state, jbyte) \
- nonstatic_field(nmethod, _state, volatile signed char) \
+ nonstatic_field(nmethod, _state, volatile signed char) \
nonstatic_field(nmethod, _exception_offset, int) \
nonstatic_field(nmethod, _orig_pc_offset, int) \
nonstatic_field(nmethod, _stub_offset, int) \
@@ -1062,12 +1063,12 @@
/* -XX flags */ \
/*********************/ \
\
- nonstatic_field(Flag, _type, const char*) \
- nonstatic_field(Flag, _name, const char*) \
- unchecked_nonstatic_field(Flag, _addr, sizeof(void*)) /* NOTE: no type */ \
- nonstatic_field(Flag, _flags, Flag::Flags) \
- static_field(Flag, flags, Flag*) \
- static_field(Flag, numFlags, size_t) \
+ nonstatic_field(JVMFlag, _type, const char*) \
+ nonstatic_field(JVMFlag, _name, const char*) \
+ unchecked_nonstatic_field(JVMFlag, _addr, sizeof(void*)) /* NOTE: no type */ \
+ nonstatic_field(JVMFlag, _flags, JVMFlag::Flags) \
+ static_field(JVMFlag, flags, JVMFlag*) \
+ static_field(JVMFlag, numFlags, size_t) \
\
/*************************/ \
/* JDK / VM version info */ \
@@ -1444,14 +1445,14 @@
declare_toplevel_type(SharedRuntime) \
\
declare_toplevel_type(CodeBlob) \
- declare_type(RuntimeBlob, CodeBlob) \
- declare_type(BufferBlob, RuntimeBlob) \
+ declare_type(RuntimeBlob, CodeBlob) \
+ declare_type(BufferBlob, RuntimeBlob) \
declare_type(AdapterBlob, BufferBlob) \
declare_type(MethodHandlesAdapterBlob, BufferBlob) \
declare_type(CompiledMethod, CodeBlob) \
declare_type(nmethod, CompiledMethod) \
- declare_type(RuntimeStub, RuntimeBlob) \
- declare_type(SingletonBlob, RuntimeBlob) \
+ declare_type(RuntimeStub, RuntimeBlob) \
+ declare_type(SingletonBlob, RuntimeBlob) \
declare_type(SafepointBlob, SingletonBlob) \
declare_type(DeoptimizationBlob, SingletonBlob) \
declare_c2_type(ExceptionBlob, SingletonBlob) \
@@ -1910,8 +1911,8 @@
/* -XX flags */ \
/********************/ \
\
- declare_toplevel_type(Flag) \
- declare_toplevel_type(Flag*) \
+ declare_toplevel_type(JVMFlag) \
+ declare_toplevel_type(JVMFlag*) \
\
/********************/ \
/* JVMTI */ \
@@ -1951,7 +1952,7 @@
declare_integer_type(ThreadState) \
declare_integer_type(Location::Type) \
declare_integer_type(Location::Where) \
- declare_integer_type(Flag::Flags) \
+ declare_integer_type(JVMFlag::Flags) \
COMPILER2_PRESENT(declare_integer_type(OptoReg::Name)) \
\
declare_toplevel_type(CHeapObj<mtInternal>) \
--- a/src/hotspot/share/services/attachListener.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/services/attachListener.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -31,6 +31,7 @@
#include "oops/typeArrayOop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
@@ -275,9 +276,9 @@
FormatBuffer<80> err_msg("%s", "");
- int ret = WriteableFlags::set_flag(op->arg(0), op->arg(1), Flag::ATTACH_ON_DEMAND, err_msg);
- if (ret != Flag::SUCCESS) {
- if (ret == Flag::NON_WRITABLE) {
+ int ret = WriteableFlags::set_flag(op->arg(0), op->arg(1), JVMFlag::ATTACH_ON_DEMAND, err_msg);
+ if (ret != JVMFlag::SUCCESS) {
+ if (ret == JVMFlag::NON_WRITABLE) {
// if the flag is not manageable try to change it through
// the platform dependent implementation
return AttachListener::pd_set_flag(op, out);
@@ -298,7 +299,7 @@
out->print_cr("flag name is missing");
return JNI_ERR;
}
- Flag* f = Flag::find_flag((char*)name, strlen(name));
+ JVMFlag* f = JVMFlag::find_flag((char*)name, strlen(name));
if (f) {
f->print_as_flag(out);
out->cr();
--- a/src/hotspot/share/services/diagnosticCommand.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/services/diagnosticCommand.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -33,7 +33,7 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
-#include "runtime/globals.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
@@ -231,9 +231,9 @@
void PrintVMFlagsDCmd::execute(DCmdSource source, TRAPS) {
if (_all.value()) {
- CommandLineFlags::printFlags(output(), true);
+ JVMFlag::printFlags(output(), true);
} else {
- CommandLineFlags::printSetFlags(output());
+ JVMFlag::printSetFlags(output());
}
}
@@ -264,9 +264,9 @@
}
FormatBuffer<80> err_msg("%s", "");
- int ret = WriteableFlags::set_flag(_flag.value(), val, Flag::MANAGEMENT, err_msg);
+ int ret = WriteableFlags::set_flag(_flag.value(), val, JVMFlag::MANAGEMENT, err_msg);
- if (ret != Flag::SUCCESS) {
+ if (ret != JVMFlag::SUCCESS) {
output()->print_cr("%s", err_msg.buffer());
}
}
--- a/src/hotspot/share/services/dtraceAttacher.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/services/dtraceAttacher.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/dtraceAttacher.hpp"
@@ -50,8 +51,8 @@
};
static void set_bool_flag(const char* flag, bool value) {
- CommandLineFlags::boolAtPut((char*)flag, strlen(flag), &value,
- Flag::ATTACH_ON_DEMAND);
+ JVMFlag::boolAtPut((char*)flag, strlen(flag), &value,
+ JVMFlag::ATTACH_ON_DEMAND);
}
// Enable only the "fine grained" flags. Do *not* touch
--- a/src/hotspot/share/services/management.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/services/management.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -36,6 +36,7 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/arguments.hpp"
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@@ -866,10 +867,10 @@
static jint get_num_flags() {
// last flag entry is always NULL, so subtract 1
- int nFlags = (int) Flag::numFlags - 1;
+ int nFlags = (int) JVMFlag::numFlags - 1;
int count = 0;
for (int i = 0; i < nFlags; i++) {
- Flag* flag = &Flag::flags[i];
+ JVMFlag* flag = &JVMFlag::flags[i];
// Exclude the locked (diagnostic, experimental) flags
if (flag->is_unlocked() || flag->is_unlocker()) {
count++;
@@ -1419,14 +1420,14 @@
// Returns a String array of all VM global flag names
JVM_ENTRY(jobjectArray, jmm_GetVMGlobalNames(JNIEnv *env))
// last flag entry is always NULL, so subtract 1
- int nFlags = (int) Flag::numFlags - 1;
+ int nFlags = (int) JVMFlag::numFlags - 1;
// allocate a temp array
objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
nFlags, CHECK_0);
objArrayHandle flags_ah(THREAD, r);
int num_entries = 0;
for (int i = 0; i < nFlags; i++) {
- Flag* flag = &Flag::flags[i];
+ JVMFlag* flag = &JVMFlag::flags[i];
// Exclude notproduct and develop flags in product builds.
if (flag->is_constant_in_binary()) {
continue;
@@ -1454,7 +1455,7 @@
// Utility function used by jmm_GetVMGlobals. Returns false if flag type
// can't be determined, true otherwise. If false is returned, then *global
// will be incomplete and invalid.
-bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, Flag *flag, TRAPS) {
+bool add_global_entry(JNIEnv* env, Handle name, jmmVMGlobal *global, JVMFlag *flag, TRAPS) {
Handle flag_name;
if (name() == NULL) {
flag_name = java_lang_String::create_from_str(flag->_name, CHECK_false);
@@ -1499,25 +1500,25 @@
global->writeable = flag->is_writeable();
global->external = flag->is_external();
switch (flag->get_origin()) {
- case Flag::DEFAULT:
+ case JVMFlag::DEFAULT:
global->origin = JMM_VMGLOBAL_ORIGIN_DEFAULT;
break;
- case Flag::COMMAND_LINE:
+ case JVMFlag::COMMAND_LINE:
global->origin = JMM_VMGLOBAL_ORIGIN_COMMAND_LINE;
break;
- case Flag::ENVIRON_VAR:
+ case JVMFlag::ENVIRON_VAR:
global->origin = JMM_VMGLOBAL_ORIGIN_ENVIRON_VAR;
break;
- case Flag::CONFIG_FILE:
+ case JVMFlag::CONFIG_FILE:
global->origin = JMM_VMGLOBAL_ORIGIN_CONFIG_FILE;
break;
- case Flag::MANAGEMENT:
+ case JVMFlag::MANAGEMENT:
global->origin = JMM_VMGLOBAL_ORIGIN_MANAGEMENT;
break;
- case Flag::ERGONOMIC:
+ case JVMFlag::ERGONOMIC:
global->origin = JMM_VMGLOBAL_ORIGIN_ERGONOMIC;
break;
- case Flag::ATTACH_ON_DEMAND:
+ case JVMFlag::ATTACH_ON_DEMAND:
global->origin = JMM_VMGLOBAL_ORIGIN_ATTACH_ON_DEMAND;
break;
default:
@@ -1531,7 +1532,7 @@
// specified by names. If names == NULL, fill globals array
// with all Flags. Return value is number of entries
// created in globals.
-// If a Flag with a given name in an array element does not
+// If a JVMFlag with a given name in an array element does not
// exist, globals[i].name will be set to NULL.
JVM_ENTRY(jint, jmm_GetVMGlobals(JNIEnv *env,
jobjectArray names,
@@ -1566,7 +1567,7 @@
Handle sh(THREAD, s);
char* str = java_lang_String::as_utf8_string(s);
- Flag* flag = Flag::find_flag(str, strlen(str));
+ JVMFlag* flag = JVMFlag::find_flag(str, strlen(str));
if (flag != NULL &&
add_global_entry(env, sh, &globals[i], flag, THREAD)) {
num_entries++;
@@ -1579,11 +1580,11 @@
// return all globals if names == NULL
// last flag entry is always NULL, so subtract 1
- int nFlags = (int) Flag::numFlags - 1;
+ int nFlags = (int) JVMFlag::numFlags - 1;
Handle null_h;
int num_entries = 0;
for (int i = 0; i < nFlags && num_entries < count; i++) {
- Flag* flag = &Flag::flags[i];
+ JVMFlag* flag = &JVMFlag::flags[i];
// Exclude notproduct and develop flags in product builds.
if (flag->is_constant_in_binary()) {
continue;
@@ -1609,10 +1610,10 @@
char* name = java_lang_String::as_utf8_string(fn);
FormatBuffer<80> error_msg("%s", "");
- int succeed = WriteableFlags::set_flag(name, new_value, Flag::MANAGEMENT, error_msg);
+ int succeed = WriteableFlags::set_flag(name, new_value, JVMFlag::MANAGEMENT, error_msg);
- if (succeed != Flag::SUCCESS) {
- if (succeed == Flag::MISSING_VALUE) {
+ if (succeed != JVMFlag::SUCCESS) {
+ if (succeed == JVMFlag::MISSING_VALUE) {
// missing value causes NPE to be thrown
THROW(vmSymbols::java_lang_NullPointerException());
} else {
@@ -1621,7 +1622,7 @@
error_msg.buffer());
}
}
- assert(succeed == Flag::SUCCESS, "Setting flag should succeed");
+ assert(succeed == JVMFlag::SUCCESS, "Setting flag should succeed");
JVM_END
class ThreadTimesClosure: public ThreadClosure {
--- a/src/hotspot/share/services/writeableFlags.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/services/writeableFlags.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -26,7 +26,8 @@
#include "classfile/javaClasses.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/commandLineFlagRangeList.hpp"
+#include "runtime/flags/jvmFlag.hpp"
+#include "runtime/flags/jvmFlagRangeList.hpp"
#include "runtime/java.hpp"
#include "runtime/jniHandles.hpp"
#include "services/writeableFlags.hpp"
@@ -38,7 +39,7 @@
}
static void print_flag_error_message_bounds(const char* name, char* buffer) {
- CommandLineFlagRange* range = CommandLineFlagRangeList::find(name);
+ JVMFlagRange* range = JVMFlagRangeList::find(name);
if (range != NULL) {
buffer_concat(buffer, "must have value in range ");
@@ -58,34 +59,34 @@
}
}
-static void print_flag_error_message_if_needed(Flag::Error error, const char* name, FormatBuffer<80>& err_msg) {
- if (error == Flag::SUCCESS) {
+static void print_flag_error_message_if_needed(JVMFlag::Error error, const char* name, FormatBuffer<80>& err_msg) {
+ if (error == JVMFlag::SUCCESS) {
return;
}
char buffer[TEMP_BUF_SIZE] = {'\0'};
- if ((error != Flag::MISSING_NAME) && (name != NULL)) {
+ if ((error != JVMFlag::MISSING_NAME) && (name != NULL)) {
buffer_concat(buffer, name);
buffer_concat(buffer, " error: ");
} else {
buffer_concat(buffer, "Error: ");
}
switch (error) {
- case Flag::MISSING_NAME:
+ case JVMFlag::MISSING_NAME:
buffer_concat(buffer, "flag name is missing."); break;
- case Flag::MISSING_VALUE:
+ case JVMFlag::MISSING_VALUE:
buffer_concat(buffer, "parsing the textual form of the value."); break;
- case Flag::NON_WRITABLE:
+ case JVMFlag::NON_WRITABLE:
buffer_concat(buffer, "flag is not writeable."); break;
- case Flag::OUT_OF_BOUNDS:
+ case JVMFlag::OUT_OF_BOUNDS:
print_flag_error_message_bounds(name, buffer); break;
- case Flag::VIOLATES_CONSTRAINT:
+ case JVMFlag::VIOLATES_CONSTRAINT:
buffer_concat(buffer, "value violates its flag's constraint."); break;
- case Flag::INVALID_FLAG:
+ case JVMFlag::INVALID_FLAG:
buffer_concat(buffer, "there is no flag with the given name."); break;
- case Flag::ERR_OTHER:
+ case JVMFlag::ERR_OTHER:
buffer_concat(buffer, "other, unspecified error related to setting the flag."); break;
- case Flag::SUCCESS:
+ case JVMFlag::SUCCESS:
break;
default:
break;
@@ -95,127 +96,127 @@
}
// set a boolean global flag
-Flag::Error WriteableFlags::set_bool_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_bool_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
if ((strcasecmp(arg, "true") == 0) || (*arg == '1' && *(arg + 1) == 0)) {
return set_bool_flag(name, true, origin, err_msg);
} else if ((strcasecmp(arg, "false") == 0) || (*arg == '0' && *(arg + 1) == 0)) {
return set_bool_flag(name, false, origin, err_msg);
}
err_msg.print("flag value must be a boolean (1/0 or true/false)");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_bool_flag(const char* name, bool value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::boolAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_bool_flag(const char* name, bool value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::boolAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a int global flag
-Flag::Error WriteableFlags::set_int_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_int_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
int value;
if (sscanf(arg, "%d", &value)) {
return set_int_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an integer");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_int_flag(const char* name, int value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::intAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_int_flag(const char* name, int value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::intAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a uint global flag
-Flag::Error WriteableFlags::set_uint_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_uint_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
uint value;
if (sscanf(arg, "%u", &value)) {
return set_uint_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an unsigned integer");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_uint_flag(const char* name, uint value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::uintAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_uint_flag(const char* name, uint value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::uintAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a intx global flag
-Flag::Error WriteableFlags::set_intx_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_intx_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
intx value;
if (sscanf(arg, INTX_FORMAT, &value)) {
return set_intx_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an integer");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_intx_flag(const char* name, intx value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::intxAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_intx_flag(const char* name, intx value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::intxAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a uintx global flag
-Flag::Error WriteableFlags::set_uintx_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_uintx_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
uintx value;
if (sscanf(arg, UINTX_FORMAT, &value)) {
return set_uintx_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an unsigned integer");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_uintx_flag(const char* name, uintx value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::uintxAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_uintx_flag(const char* name, uintx value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::uintxAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a uint64_t global flag
-Flag::Error WriteableFlags::set_uint64_t_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_uint64_t_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
uint64_t value;
if (sscanf(arg, UINT64_FORMAT, &value)) {
return set_uint64_t_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an unsigned 64-bit integer");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_uint64_t_flag(const char* name, uint64_t value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::uint64_tAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_uint64_t_flag(const char* name, uint64_t value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::uint64_tAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a size_t global flag
-Flag::Error WriteableFlags::set_size_t_flag(const char* name, const char* arg, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_size_t_flag(const char* name, const char* arg, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
size_t value;
if (sscanf(arg, SIZE_FORMAT, &value)) {
return set_size_t_flag(name, value, origin, err_msg);
}
err_msg.print("flag value must be an unsigned integer");
- return Flag::WRONG_FORMAT;
+ return JVMFlag::WRONG_FORMAT;
}
-Flag::Error WriteableFlags::set_size_t_flag(const char* name, size_t value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::size_tAtPut(name, &value, origin);
+JVMFlag::Error WriteableFlags::set_size_t_flag(const char* name, size_t value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::size_tAtPut(name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
// set a string global flag using value from AttachOperation
-Flag::Error WriteableFlags::set_ccstr_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
- Flag::Error err = CommandLineFlags::ccstrAtPut((char*)name, &value, origin);
+JVMFlag::Error WriteableFlags::set_ccstr_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
+ JVMFlag::Error err = JVMFlag::ccstrAtPut((char*)name, &value, origin);
print_flag_error_message_if_needed(err, name, err_msg);
return err;
}
@@ -225,7 +226,7 @@
* - return status is one of the WriteableFlags::err enum values
* - an eventual error message will be generated to the provided err_msg buffer
*/
-Flag::Error WriteableFlags::set_flag(const char* flag_name, const char* flag_value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_flag(const char* flag_name, const char* flag_value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
return set_flag(flag_name, &flag_value, set_flag_from_char, origin, err_msg);
}
@@ -234,42 +235,42 @@
* - return status is one of the WriteableFlags::err enum values
* - an eventual error message will be generated to the provided err_msg buffer
*/
-Flag::Error WriteableFlags::set_flag(const char* flag_name, jvalue flag_value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_flag(const char* flag_name, jvalue flag_value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
return set_flag(flag_name, &flag_value, set_flag_from_jvalue, origin, err_msg);
}
// a writeable flag setter accepting either 'jvalue' or 'char *' values
-Flag::Error WriteableFlags::set_flag(const char* name, const void* value, Flag::Error(*setter)(Flag*,const void*,Flag::Flags,FormatBuffer<80>&), Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_flag(const char* name, const void* value, JVMFlag::Error(*setter)(JVMFlag*,const void*,JVMFlag::Flags,FormatBuffer<80>&), JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
if (name == NULL) {
err_msg.print("flag name is missing");
- return Flag::MISSING_NAME;
+ return JVMFlag::MISSING_NAME;
}
if (value == NULL) {
err_msg.print("flag value is missing");
- return Flag::MISSING_VALUE;
+ return JVMFlag::MISSING_VALUE;
}
- Flag* f = Flag::find_flag((char*)name, strlen(name));
+ JVMFlag* f = JVMFlag::find_flag((char*)name, strlen(name));
if (f) {
// only writeable flags are allowed to be set
if (f->is_writeable()) {
return setter(f, value, origin, err_msg);
} else {
err_msg.print("only 'writeable' flags can be set");
- return Flag::NON_WRITABLE;
+ return JVMFlag::NON_WRITABLE;
}
}
err_msg.print("flag %s does not exist", name);
- return Flag::INVALID_FLAG;
+ return JVMFlag::INVALID_FLAG;
}
// a writeable flag setter accepting 'char *' values
-Flag::Error WriteableFlags::set_flag_from_char(Flag* f, const void* value, Flag::Flags origin, FormatBuffer<80>& err_msg) {
+JVMFlag::Error WriteableFlags::set_flag_from_char(JVMFlag* f, const void* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg) {
char* flag_value = *(char**)value;
if (flag_value == NULL) {
err_msg.print("flag value is missing");
- return Flag::MISSING_VALUE;
+ return JVMFlag::MISSING_VALUE;
}
if (f->is_bool()) {
return set_bool_flag(f->_name, flag_value, origin, err_msg);
@@ -290,11 +291,11 @@
} else {
ShouldNotReachHere();
}
- return Flag::ERR_OTHER;
+ return JVMFlag::ERR_OTHER;
}
// a writeable flag setter accepting 'jvalue' values
-Flag::Error WriteableFlags::set_flag_from_jvalue(Flag* f, const void* value, Flag::Flags origin,
+JVMFlag::Error WriteableFlags::set_flag_from_jvalue(JVMFlag* f, const void* value, JVMFlag::Flags origin,
FormatBuffer<80>& err_msg) {
jvalue new_value = *(jvalue*)value;
if (f->is_bool()) {
@@ -322,16 +323,16 @@
oop str = JNIHandles::resolve_external_guard(new_value.l);
if (str == NULL) {
err_msg.print("flag value is missing");
- return Flag::MISSING_VALUE;
+ return JVMFlag::MISSING_VALUE;
}
ccstr svalue = java_lang_String::as_utf8_string(str);
- Flag::Error ret = WriteableFlags::set_ccstr_flag(f->_name, svalue, origin, err_msg);
- if (ret != Flag::SUCCESS) {
+ JVMFlag::Error ret = WriteableFlags::set_ccstr_flag(f->_name, svalue, origin, err_msg);
+ if (ret != JVMFlag::SUCCESS) {
FREE_C_HEAP_ARRAY(char, svalue);
}
return ret;
} else {
ShouldNotReachHere();
}
- return Flag::ERR_OTHER;
+ return JVMFlag::ERR_OTHER;
}
--- a/src/hotspot/share/services/writeableFlags.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/services/writeableFlags.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,48 +25,49 @@
#ifndef SHARE_VM_SERVICES_WRITEABLEFLAG_HPP
#define SHARE_VM_SERVICES_WRITEABLEFLAG_HPP
+#include "runtime/flags/jvmFlag.hpp"
#include "runtime/globals.hpp"
#include "utilities/formatBuffer.hpp"
class WriteableFlags : AllStatic {
private:
// a writeable flag setter accepting either 'jvalue' or 'char *' values
- static Flag::Error set_flag(const char* name, const void* value, Flag::Error(*setter)(Flag*, const void*, Flag::Flags, FormatBuffer<80>&), Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_flag(const char* name, const void* value, JVMFlag::Error(*setter)(JVMFlag*, const void*, JVMFlag::Flags, FormatBuffer<80>&), JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// a writeable flag setter accepting 'char *' values
- static Flag::Error set_flag_from_char(Flag* f, const void* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_flag_from_char(JVMFlag* f, const void* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// a writeable flag setter accepting 'jvalue' values
- static Flag::Error set_flag_from_jvalue(Flag* f, const void* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_flag_from_jvalue(JVMFlag* f, const void* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a boolean global flag
- static Flag::Error set_bool_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_bool_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a int global flag
- static Flag::Error set_int_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_int_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a uint global flag
- static Flag::Error set_uint_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_uint_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a intx global flag
- static Flag::Error set_intx_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_intx_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a uintx global flag
- static Flag::Error set_uintx_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_uintx_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a uint64_t global flag
- static Flag::Error set_uint64_t_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_uint64_t_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a size_t global flag using value from AttachOperation
- static Flag::Error set_size_t_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_size_t_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a boolean global flag
- static Flag::Error set_bool_flag(const char* name, bool value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_bool_flag(const char* name, bool value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a int global flag
- static Flag::Error set_int_flag(const char* name, int value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_int_flag(const char* name, int value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a uint global flag
- static Flag::Error set_uint_flag(const char* name, uint value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_uint_flag(const char* name, uint value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a intx global flag
- static Flag::Error set_intx_flag(const char* name, intx value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_intx_flag(const char* name, intx value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a uintx global flag
- static Flag::Error set_uintx_flag(const char* name, uintx value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_uintx_flag(const char* name, uintx value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a uint64_t global flag
- static Flag::Error set_uint64_t_flag(const char* name, uint64_t value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_uint64_t_flag(const char* name, uint64_t value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a size_t global flag using value from AttachOperation
- static Flag::Error set_size_t_flag(const char* name, size_t value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_size_t_flag(const char* name, size_t value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
// set a string global flag
- static Flag::Error set_ccstr_flag(const char* name, const char* value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_ccstr_flag(const char* name, const char* value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
public:
/* sets a writeable flag to the provided value
@@ -74,14 +75,14 @@
* - return status is one of the WriteableFlags::err enum values
* - an eventual error message will be generated to the provided err_msg buffer
*/
- static Flag::Error set_flag(const char* flag_name, const char* flag_value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_flag(const char* flag_name, const char* flag_value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
/* sets a writeable flag to the provided value
*
* - return status is one of the WriteableFlags::err enum values
* - an eventual error message will be generated to the provided err_msg buffer
*/
- static Flag::Error set_flag(const char* flag_name, jvalue flag_value, Flag::Flags origin, FormatBuffer<80>& err_msg);
+ static JVMFlag::Error set_flag(const char* flag_name, jvalue flag_value, JVMFlag::Flags origin, FormatBuffer<80>& err_msg);
};
#endif /* SHARE_VM_SERVICES_WRITEABLEFLAG_HPP */
--- a/src/hotspot/share/utilities/debug.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/utilities/debug.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -40,6 +40,7 @@
#include "prims/privilegedStack.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
--- a/src/hotspot/share/utilities/globalDefinitions.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -1259,4 +1259,11 @@
return *(void**)addr;
}
+//----------------------------------------------------------------------------------------------------
+// String type aliases used by command line flag declarations and
+// processing utilities.
+
+typedef const char* ccstr;
+typedef const char* ccstrlist; // represents string arguments which accumulate
+
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
--- a/src/hotspot/share/utilities/macros.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/utilities/macros.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -206,8 +206,10 @@
#define TIERED
#endif
#define COMPILER1_PRESENT(code) code
+#define NOT_COMPILER1(code)
#else // COMPILER1
#define COMPILER1_PRESENT(code)
+#define NOT_COMPILER1(code) code
#endif // COMPILER1
// COMPILER2 variant
--- a/src/hotspot/share/utilities/ticks.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/utilities/ticks.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -47,6 +47,11 @@
return *this;
}
+ Tickspan& operator-=(const Tickspan& rhs) {
+ _span_ticks -= rhs._span_ticks;
+ return *this;
+ }
+
jlong value() const {
return _span_ticks;
}
--- a/src/hotspot/share/utilities/ticks.inline.hpp Fri Apr 27 11:33:22 2018 +0100
+++ b/src/hotspot/share/utilities/ticks.inline.hpp Fri Apr 27 12:29:49 2018 +0100
@@ -32,6 +32,11 @@
return lhs;
}
+inline Tickspan operator-(Tickspan lhs, const Tickspan& rhs) {
+ lhs -= rhs;
+ return lhs;
+}
+
inline bool operator==(const Tickspan& lhs, const Tickspan& rhs) {
return lhs.value() == rhs.value();
}
--- a/src/java.base/share/classes/java/io/FileInputStream.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/java/io/FileInputStream.java Fri Apr 27 12:29:49 2018 +0100
@@ -79,7 +79,7 @@
private volatile boolean closed;
- private final AltFinalizer altFinalizer;
+ private final Object altFinalizer;
/**
* Creates a <code>FileInputStream</code> by
@@ -155,7 +155,7 @@
fd.attach(this);
path = name;
open(name);
- altFinalizer = AltFinalizer.get(this);
+ altFinalizer = getFinalizer(this);
if (altFinalizer == null) {
FileCleanable.register(fd); // open set the fd, register the cleanup
}
@@ -471,6 +471,23 @@
protected void finalize() throws IOException {
}
+ /*
+ * Returns a finalizer object if the FIS needs a finalizer; otherwise null.
+ * If the FIS has a close method; it needs an AltFinalizer.
+ */
+ private static Object getFinalizer(FileInputStream fis) {
+ Class<?> clazz = fis.getClass();
+ while (clazz != FileInputStream.class) {
+ try {
+ clazz.getDeclaredMethod("close");
+ return new AltFinalizer(fis);
+ } catch (NoSuchMethodException nsme) {
+ // ignore
+ }
+ clazz = clazz.getSuperclass();
+ }
+ return null;
+ }
/**
* Class to call {@code FileInputStream.close} when finalized.
* If finalization of the stream is needed, an instance is created
@@ -481,25 +498,7 @@
static class AltFinalizer {
private final FileInputStream fis;
- /*
- * Returns a finalizer object if the FIS needs a finalizer; otherwise null.
- * If the FIS has a close method; it needs an AltFinalizer.
- */
- static AltFinalizer get(FileInputStream fis) {
- Class<?> clazz = fis.getClass();
- while (clazz != FileInputStream.class) {
- try {
- clazz.getDeclaredMethod("close");
- return new AltFinalizer(fis);
- } catch (NoSuchMethodException nsme) {
- // ignore
- }
- clazz = clazz.getSuperclass();
- }
- return null;
- }
-
- private AltFinalizer(FileInputStream fis) {
+ AltFinalizer(FileInputStream fis) {
this.fis = fis;
}
--- a/src/java.base/share/classes/java/io/FileOutputStream.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/java/io/FileOutputStream.java Fri Apr 27 12:29:49 2018 +0100
@@ -95,7 +95,7 @@
private volatile boolean closed;
- private final AltFinalizer altFinalizer;
+ private final Object altFinalizer;
/**
* Creates a file output stream to write to the file with the
@@ -235,7 +235,7 @@
this.path = name;
open(name, append);
- altFinalizer = AltFinalizer.get(this);
+ altFinalizer = getFinalizer(this);
if (altFinalizer == null) {
FileCleanable.register(fd); // open sets the fd, register the cleanup
}
@@ -496,6 +496,24 @@
initIDs();
}
+ /*
+ * Returns a finalizer object if the FOS needs a finalizer; otherwise null.
+ * If the FOS has a close method; it needs an AltFinalizer.
+ */
+ private static Object getFinalizer(FileOutputStream fos) {
+ Class<?> clazz = fos.getClass();
+ while (clazz != FileOutputStream.class) {
+ try {
+ clazz.getDeclaredMethod("close");
+ return new AltFinalizer(fos);
+ } catch (NoSuchMethodException nsme) {
+ // ignore
+ }
+ clazz = clazz.getSuperclass();
+ }
+ return null;
+ }
+
/**
* Class to call {@code FileOutputStream.close} when finalized.
* If finalization of the stream is needed, an instance is created
@@ -506,25 +524,7 @@
static class AltFinalizer {
private final FileOutputStream fos;
- /*
- * Returns a finalizer object if the FOS needs a finalizer; otherwise null.
- * If the FOS has a close method; it needs an AltFinalizer.
- */
- static AltFinalizer get(FileOutputStream fos) {
- Class<?> clazz = fos.getClass();
- while (clazz != FileOutputStream.class) {
- try {
- clazz.getDeclaredMethod("close");
- return new AltFinalizer(fos);
- } catch (NoSuchMethodException nsme) {
- // ignore
- }
- clazz = clazz.getSuperclass();
- }
- return null;
- }
-
- private AltFinalizer(FileOutputStream fos) {
+ AltFinalizer(FileOutputStream fos) {
this.fos = fos;
}
--- a/src/java.base/share/classes/java/text/SimpleDateFormat.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/java/text/SimpleDateFormat.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -55,6 +55,7 @@
import sun.util.calendar.CalendarUtils;
import sun.util.calendar.ZoneInfoFile;
import sun.util.locale.provider.LocaleProviderAdapter;
+import sun.util.locale.provider.TimeZoneNameUtility;
/**
* <code>SimpleDateFormat</code> is a concrete class for formatting and
@@ -1691,6 +1692,12 @@
// Checking long and short zones [1 & 2],
// and long and short daylight [3 & 4].
String zoneName = zoneNames[i];
+ if (zoneName.isEmpty()) {
+ // fill in by retrieving single name
+ zoneName = TimeZoneNameUtility.retrieveDisplayName(
+ zoneNames[0], i >= 3, i % 2, locale);
+ zoneNames[i] = zoneName;
+ }
if (text.regionMatches(true, start,
zoneName, 0, zoneName.length())) {
return i;
--- a/src/java.base/share/classes/java/util/Locale.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/java/util/Locale.java Fri Apr 27 12:29:49 2018 +0100
@@ -2189,9 +2189,9 @@
}
break;
case "tz":
- displayType = TimeZoneNameUtility.retrieveGenericDisplayName(
- TimeZoneNameUtility.convertLDMLShortID(type).orElse(type),
- TimeZone.LONG, inLocale);
+ displayType = TimeZoneNameUtility.convertLDMLShortID(type)
+ .map(id -> TimeZoneNameUtility.retrieveGenericDisplayName(id, TimeZone.LONG, inLocale))
+ .orElse(type);
break;
}
ret = MessageFormat.format(lr.getLocaleName("ListKeyTypePattern"),
--- a/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/cldr/CLDRLocaleProviderAdapter.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import java.util.spi.CalendarDataProvider;
+import java.util.spi.TimeZoneNameProvider;
import sun.util.locale.provider.JRELocaleProviderAdapter;
import sun.util.locale.provider.LocaleDataMetaInfo;
import sun.util.locale.provider.LocaleProviderAdapter;
@@ -131,6 +132,24 @@
}
@Override
+ public TimeZoneNameProvider getTimeZoneNameProvider() {
+ if (timeZoneNameProvider == null) {
+ TimeZoneNameProvider provider = AccessController.doPrivileged(
+ (PrivilegedAction<TimeZoneNameProvider>) () ->
+ new CLDRTimeZoneNameProviderImpl(
+ getAdapterType(),
+ getLanguageTagSet("TimeZoneNames")));
+
+ synchronized (this) {
+ if (timeZoneNameProvider == null) {
+ timeZoneNameProvider = provider;
+ }
+ }
+ }
+ return timeZoneNameProvider;
+ }
+
+ @Override
public Locale[] getAvailableLocales() {
Set<String> all = createLanguageTagSet("AvailableLocales");
Locale[] locs = new Locale[all.size()];
@@ -246,9 +265,9 @@
}
/**
- * Returns the time zone ID from an LDML's short ID
+ * Returns the canonical ID for the given ID
*/
- public Optional<String> getTimeZoneID(String shortID) {
- return Optional.ofNullable(baseMetaInfo.tzShortIDs().get(shortID));
+ public Optional<String> canonicalTZID(String id) {
+ return Optional.ofNullable(baseMetaInfo.tzCanonicalIDs().get(id));
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/java.base/share/classes/sun/util/cldr/CLDRTimeZoneNameProviderImpl.java Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package sun.util.cldr;
+
+import static sun.util.locale.provider.LocaleProviderAdapter.Type;
+
+import java.text.MessageFormat;
+import java.util.Arrays;
+import java.util.Locale;
+import java.util.Objects;
+import java.util.ResourceBundle;
+import java.util.Set;
+import java.util.TimeZone;
+import java.util.stream.Collectors;
+import sun.util.calendar.ZoneInfoFile;
+import sun.util.locale.provider.LocaleProviderAdapter;
+import sun.util.locale.provider.LocaleResources;
+import sun.util.locale.provider.TimeZoneNameProviderImpl;
+import sun.util.locale.provider.TimeZoneNameUtility;
+
+/**
+ * Concrete implementation of the
+ * {@link java.util.spi.TimeZoneNameProvider TimeZoneNameProvider} class
+ * for the CLDR LocaleProviderAdapter.
+ *
+ * @author Naoto Sato
+ */
+public class CLDRTimeZoneNameProviderImpl extends TimeZoneNameProviderImpl {
+
+ private static final String NO_INHERITANCE_MARKER = "\u2205\u2205\u2205";
+ private static class AVAILABLE_IDS {
+ static final String[] INSTANCE =
+ Arrays.stream(ZoneInfoFile.getZoneIds())
+ .sorted()
+ .toArray(String[]::new);
+ }
+
+ // display name array indexes
+ private static final int INDEX_TZID = 0;
+ private static final int INDEX_STD_LONG = 1;
+ private static final int INDEX_STD_SHORT = 2;
+ private static final int INDEX_DST_LONG = 3;
+ private static final int INDEX_DST_SHORT = 4;
+ private static final int INDEX_GEN_LONG = 5;
+ private static final int INDEX_GEN_SHORT = 6;
+
+ public CLDRTimeZoneNameProviderImpl(Type type, Set<String> langtags) {
+ super(type, langtags);
+ }
+
+ @Override
+ protected String[] getDisplayNameArray(String id, Locale locale) {
+ String tzid = TimeZoneNameUtility.canonicalTZID(id).orElse(id);
+ String[] namesSuper = super.getDisplayNameArray(tzid, locale);
+
+ if (Objects.nonNull(namesSuper)) {
+ // CLDR's resource bundle has an translated entry for this id.
+ // Fix up names if needed, either missing or no-inheritance
+ namesSuper[INDEX_TZID] = id;
+
+ // Check if standard long name exists. If not, try to retrieve the name
+ // from language only locale resources. E.g., "Europe/London"
+ // for en-GB only contains DST names
+ if (!exists(namesSuper, INDEX_STD_LONG) && !locale.getCountry().isEmpty()) {
+ String[] names =
+ getDisplayNameArray(id, Locale.forLanguageTag(locale.getLanguage()));
+ if (exists(names, INDEX_STD_LONG)) {
+ namesSuper[INDEX_STD_LONG] = names[INDEX_STD_LONG];
+ }
+ }
+
+ for(int i = INDEX_STD_LONG; i < namesSuper.length; i++) { // index 0 is the 'id' itself
+ switch (namesSuper[i]) {
+ case "":
+ // Fill in empty elements
+ deriveFallbackName(namesSuper, i, locale,
+ namesSuper[INDEX_DST_LONG].isEmpty());
+ break;
+ case NO_INHERITANCE_MARKER:
+ // CLDR's "no inheritance marker"
+ namesSuper[i] = toGMTFormat(id, i == INDEX_DST_LONG || i == INDEX_DST_SHORT,
+ i % 2 != 0, locale);
+ break;
+ default:
+ break;
+ }
+ }
+ return namesSuper;
+ } else {
+ // Derive the names for this id. Validate the id first.
+ if (Arrays.binarySearch(AVAILABLE_IDS.INSTANCE, id) >= 0) {
+ String[] names = new String[INDEX_GEN_SHORT + 1];
+ names[INDEX_TZID] = id;
+ deriveFallbackNames(names, locale);
+ return names;
+ }
+ }
+
+ return null;
+ }
+
+ @Override
+ protected String[][] getZoneStrings(Locale locale) {
+ // Use English for the ROOT locale
+ locale = locale.equals(Locale.ROOT) ? Locale.ENGLISH : locale;
+ String[][] ret = super.getZoneStrings(locale);
+
+ // Fill in for the empty names.
+ // English names are prefilled for performance.
+ if (locale.getLanguage() != "en") {
+ for (int zoneIndex = 0; zoneIndex < ret.length; zoneIndex++) {
+ deriveFallbackNames(ret[zoneIndex], locale);
+ }
+ }
+ return ret;
+ }
+
+ // Derive fallback time zone name according to LDML's logic
+ private void deriveFallbackNames(String[] names, Locale locale) {
+ for (int i = INDEX_STD_LONG; i <= INDEX_GEN_SHORT; i++) {
+ deriveFallbackName(names, i, locale, false);
+ }
+ }
+
+ private void deriveFallbackName(String[] names, int index, Locale locale, boolean noDST) {
+ if (exists(names, index)) {
+ return;
+ }
+
+ // Check if COMPAT can substitute the name
+ if (LocaleProviderAdapter.getAdapterPreference().contains(Type.JRE)) {
+ String[] compatNames = (String[])LocaleProviderAdapter.forJRE()
+ .getLocaleResources(locale)
+ .getTimeZoneNames(names[INDEX_TZID]);
+ if (compatNames != null) {
+ for (int i = INDEX_STD_LONG; i <= INDEX_GEN_SHORT; i++) {
+ // Assumes COMPAT has no empty slots
+ if (i == index || !exists(names, i)) {
+ names[i] = compatNames[i];
+ }
+ }
+ return;
+ }
+ }
+
+ // Type Fallback
+ if (noDST && typeFallback(names, index)) {
+ return;
+ }
+
+ // Region Fallback
+ if (regionFormatFallback(names, index, locale)) {
+ return;
+ }
+
+ // last resort
+ String id = names[INDEX_TZID].toUpperCase(Locale.ROOT);
+ if (!id.startsWith("ETC/GMT") &&
+ !id.startsWith("GMT") &&
+ !id.startsWith("UT")) {
+ names[index] = toGMTFormat(names[INDEX_TZID],
+ index == INDEX_DST_LONG || index == INDEX_DST_SHORT,
+ index % 2 != 0,
+ locale);
+ }
+ }
+
+ private boolean exists(String[] names, int index) {
+ return Objects.nonNull(names)
+ && Objects.nonNull(names[index])
+ && !names[index].isEmpty();
+ }
+
+ private boolean typeFallback(String[] names, int index) {
+ // check generic
+ int genIndex = INDEX_GEN_SHORT - index % 2;
+ if (!exists(names, index) && exists(names, genIndex)) {
+ names[index] = names[genIndex];
+ } else {
+ // check standard
+ int stdIndex = INDEX_STD_SHORT - index % 2;
+ if (!exists(names, index) && exists(names, stdIndex)) {
+ names[index] = names[stdIndex];
+ }
+ }
+
+ return exists(names, index);
+ }
+
+ private boolean regionFormatFallback(String[] names, int index, Locale l) {
+ String id = names[INDEX_TZID];
+ LocaleResources lr = LocaleProviderAdapter.forType(Type.CLDR).getLocaleResources(l);
+ ResourceBundle fd = lr.getJavaTimeFormatData();
+
+ String rgn = (String) lr.getTimeZoneNames("timezone.excity." + id);
+ if (rgn == null && !id.startsWith("Etc") && !id.startsWith("SystemV")) {
+ int slash = id.lastIndexOf('/');
+ if (slash > 0) {
+ rgn = id.substring(slash + 1).replaceAll("_", " ");
+ }
+ }
+
+ if (rgn != null) {
+ String fmt = "";
+ switch (index) {
+ case INDEX_STD_LONG:
+ fmt = fd.getString("timezone.regionFormat.standard");
+ break;
+ case INDEX_DST_LONG:
+ fmt = fd.getString("timezone.regionFormat.daylight");
+ break;
+ case INDEX_GEN_LONG:
+ fmt = fd.getString("timezone.regionFormat");
+ break;
+ }
+ if (!fmt.isEmpty()) {
+ names[index] = MessageFormat.format(fmt, rgn);
+ }
+ }
+
+ return exists(names, index);
+ }
+
+ private String toGMTFormat(String id, boolean daylight, boolean isShort, Locale l) {
+ TimeZone tz = ZoneInfoFile.getZoneInfo(id);
+ int offset = (tz.getRawOffset() + (daylight ? tz.getDSTSavings() : 0)) / 60000;
+ LocaleResources lr = LocaleProviderAdapter.forType(Type.CLDR).getLocaleResources(l);
+ ResourceBundle fd = lr.getJavaTimeFormatData();
+
+ if (offset == 0) {
+ return fd.getString("timezone.gmtZeroFormat");
+ } else {
+ String gmtFormat = fd.getString("timezone.gmtFormat");
+ String hourFormat = fd.getString("timezone.hourFormat");
+
+ if (offset > 0) {
+ hourFormat = hourFormat.substring(0, hourFormat.indexOf(";"));
+ } else {
+ hourFormat = hourFormat.substring(hourFormat.indexOf(";") + 1);
+ offset = -offset;
+ }
+ hourFormat = hourFormat
+ .replaceFirst("H+", (isShort ? "\\%1\\$d" : "\\%1\\$02d"))
+ .replaceFirst("m+", "\\%2\\$02d");
+ return MessageFormat.format(gmtFormat,
+ String.format(hourFormat, offset / 60, offset % 60));
+ }
+ }
+}
--- a/src/java.base/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/locale/provider/JRELocaleProviderAdapter.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -129,7 +129,7 @@
private volatile CurrencyNameProvider currencyNameProvider;
private volatile LocaleNameProvider localeNameProvider;
- private volatile TimeZoneNameProvider timeZoneNameProvider;
+ protected volatile TimeZoneNameProvider timeZoneNameProvider;
protected volatile CalendarDataProvider calendarDataProvider;
private volatile CalendarNameProvider calendarNameProvider;
--- a/src/java.base/share/classes/sun/util/locale/provider/LocaleDataMetaInfo.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/locale/provider/LocaleDataMetaInfo.java Fri Apr 27 12:29:49 2018 +0100
@@ -50,11 +50,12 @@
public String availableLanguageTags(String category);
/**
- * Returns a map for short time zone ids in BCP47 Unicode extension and
- * the long time zone ids.
- * @return map of short id to long ids, separated by a space.
+ * Returns a map for time zone ids to their canonical ids.
+ * The map key is either an LDML's short id, or a valid
+ * TZDB zone id.
+ * @return map of ids to their canonical ids.
*/
- default public Map<String, String> tzShortIDs() {
+ default public Map<String, String> tzCanonicalIDs() {
return null;
}
}
--- a/src/java.base/share/classes/sun/util/locale/provider/LocaleResources.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/locale/provider/LocaleResources.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -52,6 +52,7 @@
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
+import sun.security.action.GetPropertyAction;
import sun.util.calendar.ZoneInfo;
import sun.util.resources.LocaleData;
import sun.util.resources.OpenListResourceBundle;
@@ -87,6 +88,9 @@
private static final String NUMBER_PATTERNS_CACHEKEY = "NP";
private static final String DATE_TIME_PATTERN = "DTP.";
+ // TimeZoneNamesBundle exemplar city prefix
+ private static final String TZNB_EXCITY_PREFIX = "timezone.excity.";
+
// null singleton cache value
private static final Object NULLOBJECT = new Object();
@@ -254,23 +258,32 @@
return (String) localeName;
}
- String[] getTimeZoneNames(String key) {
- String[] names = null;
- String cacheKey = TIME_ZONE_NAMES + '.' + key;
+ public Object getTimeZoneNames(String key) {
+ Object val = null;
+ String cacheKey = TIME_ZONE_NAMES + key;
removeEmptyReferences();
ResourceReference data = cache.get(cacheKey);
- if (Objects.isNull(data) || Objects.isNull((names = (String[]) data.get()))) {
+ if (Objects.isNull(data) || Objects.isNull(val = data.get())) {
TimeZoneNamesBundle tznb = localeData.getTimeZoneNames(locale);
if (tznb.containsKey(key)) {
- names = tznb.getStringArray(key);
+ if (key.startsWith(TZNB_EXCITY_PREFIX)) {
+ val = tznb.getString(key);
+ assert val instanceof String;
+ trace("tznb: %s key: %s, val: %s\n", tznb, key, val);
+ } else {
+ String[] names = tznb.getStringArray(key);
+ trace("tznb: %s key: %s, names: %s, %s, %s, %s, %s, %s, %s\n", tznb, key,
+ names[0], names[1], names[2], names[3], names[4], names[5], names[6]);
+ val = names;
+ }
cache.put(cacheKey,
- new ResourceReference(cacheKey, (Object) names, referenceQueue));
+ new ResourceReference(cacheKey, val, referenceQueue));
}
}
- return names;
+ return val;
}
@SuppressWarnings("unchecked")
@@ -296,7 +309,9 @@
// Use a LinkedHashSet to preseve the order
Set<String[]> value = new LinkedHashSet<>();
for (String key : keyset) {
- value.add(rb.getStringArray(key));
+ if (!key.startsWith(TZNB_EXCITY_PREFIX)) {
+ value.add(rb.getStringArray(key));
+ }
}
// Add aliases data for CLDR
@@ -514,4 +529,13 @@
return cacheKey;
}
}
+
+ private static final boolean TRACE_ON = Boolean.valueOf(
+ GetPropertyAction.privilegedGetProperty("locale.resources.debug", "false"));
+
+ public static void trace(String format, Object... params) {
+ if (TRACE_ON) {
+ System.out.format(format, params);
+ }
+ }
}
--- a/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameProviderImpl.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameProviderImpl.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
import java.util.Set;
import java.util.TimeZone;
import java.util.spi.TimeZoneNameProvider;
-import sun.util.calendar.ZoneInfoFile;
/**
* Concrete implementation of the
@@ -43,9 +42,8 @@
public class TimeZoneNameProviderImpl extends TimeZoneNameProvider {
private final LocaleProviderAdapter.Type type;
private final Set<String> langtags;
- private static final String CLDR_NO_INHERITANCE_MARKER = "\u2205\u2205\u2205";
- TimeZoneNameProviderImpl(LocaleProviderAdapter.Type type, Set<String> langtags) {
+ protected TimeZoneNameProviderImpl(LocaleProviderAdapter.Type type, Set<String> langtags) {
this.type = type;
this.langtags = langtags;
}
@@ -120,41 +118,23 @@
return null;
}
- private String[] getDisplayNameArray(String id, Locale locale) {
+ protected String[] getDisplayNameArray(String id, Locale locale) {
Objects.requireNonNull(id);
Objects.requireNonNull(locale);
- String[] ret =
- LocaleProviderAdapter.forType(type).getLocaleResources(locale).getTimeZoneNames(id);
-
- if (Objects.nonNull(ret) && type == LocaleProviderAdapter.Type.CLDR) {
- // check for CLDR's "no inheritance marker"
- for (int index = 0; index < ret.length; index++) {
- TimeZone tz = null;
- if (CLDR_NO_INHERITANCE_MARKER.equals(ret[index])) {
- if (Objects.isNull(tz)) {
- tz = TimeZone.getTimeZone(id);
- }
- int offset = tz.getRawOffset();
- if (index == 3 || index == 4) { // daylight
- offset += tz.getDSTSavings();
- }
- ret[index] = ZoneInfoFile.toCustomID(offset);
- }
- }
- }
-
- return ret;
+ return (String []) LocaleProviderAdapter.forType(type)
+ .getLocaleResources(locale)
+ .getTimeZoneNames(id);
}
/**
* Returns a String[][] as the DateFormatSymbols.getZoneStrings() value for
- * the given locale. This method is package private.
+ * the given locale.
*
* @param locale a Locale for time zone names
* @return an array of time zone names arrays
*/
- String[][] getZoneStrings(Locale locale) {
+ protected String[][] getZoneStrings(Locale locale) {
return LocaleProviderAdapter.forType(type).getLocaleResources(locale).getZoneStrings();
}
}
--- a/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/locale/provider/TimeZoneNameUtility.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -162,9 +162,15 @@
* @return the tzdb's time zone ID
*/
public static Optional<String> convertLDMLShortID(String shortID) {
+ return canonicalTZID(shortID);
+ }
+
+ /**
+ * Returns the canonical ID for the given ID
+ */
+ public static Optional<String> canonicalTZID(String id) {
return ((CLDRLocaleProviderAdapter)LocaleProviderAdapter.forType(Type.CLDR))
- .getTimeZoneID(shortID)
- .map(id -> id.replaceAll("\\s.*", ""));
+ .canonicalTZID(id);
}
private static String[] retrieveDisplayNamesImpl(String id, Locale locale) {
--- a/src/java.base/share/classes/sun/util/resources/LocaleData.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/resources/LocaleData.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -275,11 +275,6 @@
}
}
}
- // Force fallback to Locale.ENGLISH for CLDR time zone names support
- if (locale.getLanguage() != "en"
- && type == CLDR && category.equals("TimeZoneNames")) {
- candidates.add(candidates.size() - 1, Locale.ENGLISH);
- }
CANDIDATES_MAP.putIfAbsent(key, candidates);
}
return candidates;
--- a/src/java.base/share/classes/sun/util/resources/TimeZoneNamesBundle.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/java.base/share/classes/sun/util/resources/TimeZoneNamesBundle.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -79,15 +79,16 @@
*/
@Override
public Object handleGetObject(String key) {
- String[] contents = (String[]) super.handleGetObject(key);
- if (Objects.isNull(contents)) {
- return null;
+ Object val = super.handleGetObject(key);
+ if (val instanceof String[]) {
+ String[] contents = (String[]) val;
+ int clen = contents.length;
+ String[] tmpobj = new String[7];
+ tmpobj[0] = key;
+ System.arraycopy(contents, 0, tmpobj, 1, clen);
+ return tmpobj;
}
- int clen = contents.length;
- String[] tmpobj = new String[7];
- tmpobj[0] = key;
- System.arraycopy(contents, 0, tmpobj, 1, clen);
- return tmpobj;
+ return val;
}
/**
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Apr 27 11:33:22 2018 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java Fri Apr 27 12:29:49 2018 +0100
@@ -137,7 +137,7 @@
private Boolean compressedOopsEnabled;
private Boolean compressedKlassPointersEnabled;
- // command line flags supplied to VM - see struct Flag in globals.hpp
+ // command line flags supplied to VM - see struct JVMFlag in jvmFlag.hpp
public static final class Flag {
private String type;
private String name;
@@ -916,7 +916,7 @@
private void readCommandLineFlags() {
// get command line flags
TypeDataBase db = getTypeDataBase();
- Type flagType = db.lookupType("Flag");
+ Type flagType = db.lookupType("JVMFlag");
int numFlags = (int) flagType.getCIntegerField("numFlags").getValue();
// NOTE: last flag contains null values.
commandLineFlags = new Flag[numFlags - 1];
--- a/test/hotspot/gtest/gc/g1/test_bufferingOopClosure.cpp Fri Apr 27 11:33:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,257 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/bufferingOopClosure.hpp"
-#include "memory/iterator.hpp"
-#include "unittest.hpp"
-
-class BufferingOopClosureTest : public ::testing::Test {
- public:
- // Helper class to fake a set of oop*s and narrowOop*s.
- class FakeRoots {
- public:
- // Used for sanity checking of the values passed to the do_oops functions in the test.
- static const uintptr_t NarrowOopMarker = uintptr_t(1) << (BitsPerWord -1);
-
- int _num_narrow;
- int _num_full;
- void** _narrow;
- void** _full;
-
- FakeRoots(int num_narrow, int num_full) :
- _num_narrow(num_narrow),
- _num_full(num_full),
- _narrow((void**)::malloc(sizeof(void*) * num_narrow)),
- _full((void**)::malloc(sizeof(void*) * num_full)) {
-
- for (int i = 0; i < num_narrow; i++) {
- _narrow[i] = (void*)(NarrowOopMarker + (uintptr_t)i);
- }
- for (int i = 0; i < num_full; i++) {
- _full[i] = (void*)(uintptr_t)i;
- }
- }
-
- ~FakeRoots() {
- ::free(_narrow);
- ::free(_full);
- }
-
- void oops_do_narrow_then_full(OopClosure* cl) {
- for (int i = 0; i < _num_narrow; i++) {
- cl->do_oop((narrowOop*)_narrow[i]);
- }
- for (int i = 0; i < _num_full; i++) {
- cl->do_oop((oop*)_full[i]);
- }
- }
-
- void oops_do_full_then_narrow(OopClosure* cl) {
- for (int i = 0; i < _num_full; i++) {
- cl->do_oop((oop*)_full[i]);
- }
- for (int i = 0; i < _num_narrow; i++) {
- cl->do_oop((narrowOop*)_narrow[i]);
- }
- }
-
- void oops_do_mixed(OopClosure* cl) {
- int i;
- for (i = 0; i < _num_full && i < _num_narrow; i++) {
- cl->do_oop((oop*)_full[i]);
- cl->do_oop((narrowOop*)_narrow[i]);
- }
- for (int j = i; j < _num_full; j++) {
- cl->do_oop((oop*)_full[i]);
- }
- for (int j = i; j < _num_narrow; j++) {
- cl->do_oop((narrowOop*)_narrow[i]);
- }
- }
-
- static const int MaxOrder = 2;
-
- void oops_do(OopClosure* cl, int do_oop_order) {
- switch(do_oop_order) {
- case 0:
- oops_do_narrow_then_full(cl);
- break;
- case 1:
- oops_do_full_then_narrow(cl);
- break;
- case 2:
- oops_do_mixed(cl);
- break;
- default:
- oops_do_narrow_then_full(cl);
- break;
- }
- }
- };
-
- class CountOopClosure : public OopClosure {
- int _narrow_oop_count;
- int _full_oop_count;
- public:
- CountOopClosure() : _narrow_oop_count(0), _full_oop_count(0) {}
- void do_oop(narrowOop* p) {
- EXPECT_NE(uintptr_t(0), (uintptr_t(p) & FakeRoots::NarrowOopMarker))
- << "The narrowOop was unexpectedly not marked with the NarrowOopMarker";
- _narrow_oop_count++;
- }
-
- void do_oop(oop* p){
- EXPECT_EQ(uintptr_t(0), (uintptr_t(p) & FakeRoots::NarrowOopMarker))
- << "The oop was unexpectedly marked with the NarrowOopMarker";
- _full_oop_count++;
- }
-
- int narrow_oop_count() { return _narrow_oop_count; }
- int full_oop_count() { return _full_oop_count; }
- int all_oop_count() { return _narrow_oop_count + _full_oop_count; }
- };
-
- class DoNothingOopClosure : public OopClosure {
- public:
- void do_oop(narrowOop* p) {}
- void do_oop(oop* p) {}
- };
-
- static void testCount(int num_narrow, int num_full, int do_oop_order) {
- FakeRoots fr(num_narrow, num_full);
-
- CountOopClosure coc;
- BufferingOopClosure boc(&coc);
-
- fr.oops_do(&boc, do_oop_order);
-
- boc.done();
-
- EXPECT_EQ(num_narrow, coc.narrow_oop_count()) << "when running testCount("
- << num_narrow << ", " << num_full << ", " << do_oop_order << ")";
-
- EXPECT_EQ(num_full, coc.full_oop_count()) << "when running testCount("
- << num_narrow << ", " << num_full << ", " << do_oop_order << ")";
-
- EXPECT_EQ(num_narrow + num_full, coc.all_oop_count()) << "when running testCount("
- << num_narrow << ", " << num_full << ", " << do_oop_order << ")";
- }
-
- static void testIsBufferEmptyOrFull(int num_narrow, int num_full, bool expect_empty, bool expect_full) {
- FakeRoots fr(num_narrow, num_full);
-
- DoNothingOopClosure cl;
- BufferingOopClosure boc(&cl);
-
- fr.oops_do(&boc, 0);
-
- EXPECT_EQ(expect_empty, boc.is_buffer_empty())
- << "when running testIsBufferEmptyOrFull("
- << num_narrow << ", " << num_full << ", "
- << expect_empty << ", " << expect_full << ")";
-
- EXPECT_EQ(expect_full, boc.is_buffer_full())
- << "when running testIsBufferEmptyOrFull("
- << num_narrow << ", " << num_full << ", "
- << expect_empty << ", " << expect_full << ")";
- }
-
- static void testEmptyAfterDone(int num_narrow, int num_full) {
- FakeRoots fr(num_narrow, num_full);
-
- DoNothingOopClosure cl;
- BufferingOopClosure boc(&cl);
-
- fr.oops_do(&boc, 0);
-
- // Make sure all get processed.
- boc.done();
-
- EXPECT_TRUE(boc.is_buffer_empty()) << "Should be empty after call to done()."
- << " testEmptyAfterDone(" << num_narrow << ", " << num_full << ")";
- }
-
- static int get_buffer_length() {
- return BufferingOopClosure::BufferLength;
- }
-};
-
-TEST_VM_F(BufferingOopClosureTest, count_test) {
- int bl = BufferingOopClosureTest::get_buffer_length();
-
- for (int order = 0; order < FakeRoots::MaxOrder; order++) {
- testCount(0, 0, order);
- testCount(10, 0, order);
- testCount(0, 10, order);
- testCount(10, 10, order);
- testCount(bl, 10, order);
- testCount(10, bl, order);
- testCount(bl, bl, order);
- testCount(bl + 1, 10, order);
- testCount(10, bl + 1, order);
- testCount(bl + 1, bl, order);
- testCount(bl, bl + 1, order);
- testCount(bl + 1, bl + 1, order);
- }
-}
-
-TEST_VM_F(BufferingOopClosureTest, buffer_empty_or_full) {
- int bl = BufferingOopClosureTest::get_buffer_length();
-
- testIsBufferEmptyOrFull(0, 0, true, false);
- testIsBufferEmptyOrFull(1, 0, false, false);
- testIsBufferEmptyOrFull(0, 1, false, false);
- testIsBufferEmptyOrFull(1, 1, false, false);
- testIsBufferEmptyOrFull(10, 0, false, false);
- testIsBufferEmptyOrFull(0, 10, false, false);
- testIsBufferEmptyOrFull(10, 10, false, false);
- testIsBufferEmptyOrFull(0, bl, false, true);
- testIsBufferEmptyOrFull(bl, 0, false, true);
- testIsBufferEmptyOrFull(bl / 2, bl / 2, false, true);
- testIsBufferEmptyOrFull(bl - 1, 1, false, true);
- testIsBufferEmptyOrFull(1, bl - 1, false, true);
- // Processed
- testIsBufferEmptyOrFull(bl + 1, 0, false, false);
- testIsBufferEmptyOrFull(bl * 2, 0, false, true);
-}
-
-TEST_VM_F(BufferingOopClosureTest, empty_after_done) {
- int bl = BufferingOopClosureTest::get_buffer_length();
-
- testEmptyAfterDone(0, 0);
- testEmptyAfterDone(1, 0);
- testEmptyAfterDone(0, 1);
- testEmptyAfterDone(1, 1);
- testEmptyAfterDone(10, 0);
- testEmptyAfterDone(0, 10);
- testEmptyAfterDone(10, 10);
- testEmptyAfterDone(0, bl);
- testEmptyAfterDone(bl, 0);
- testEmptyAfterDone(bl / 2, bl / 2);
- testEmptyAfterDone(bl - 1, 1);
- testEmptyAfterDone(1, bl - 1);
- // Processed
- testEmptyAfterDone(bl + 1, 0);
- testEmptyAfterDone(bl * 2, 0);
-}
--- a/test/hotspot/gtest/gc/shared/test_collectorPolicy.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/test/hotspot/gtest/gc/shared/test_collectorPolicy.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "runtime/arguments.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
--- a/test/hotspot/gtest/runtime/test_globals.cpp Fri Apr 27 11:33:22 2018 +0100
+++ b/test/hotspot/gtest/runtime/test_globals.cpp Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,17 +23,18 @@
#include "precompiled.hpp"
#include "runtime/globals.hpp"
+#include "runtime/flags/flagSetting.hpp"
#include "unittest.hpp"
-#define TEST_FLAG(f, type, value) \
- do { \
- ASSERT_TRUE(Flag::find_flag(#f)->is_ ## type()); \
- type original_value = f; \
- { \
- FLAG_GUARD(f); \
- f = value; \
- } \
- ASSERT_EQ(original_value, f); \
+#define TEST_FLAG(f, type, value) \
+ do { \
+ ASSERT_TRUE(JVMFlag::find_flag(#f)->is_ ## type()); \
+ type original_value = f; \
+ { \
+ FLAG_GUARD(f); \
+ f = value; \
+ } \
+ ASSERT_EQ(original_value, f); \
} while (0)
TEST_VM(FlagGuard, bool_flag) {
--- a/test/hotspot/jtreg/ProblemList.txt Fri Apr 27 11:33:22 2018 +0100
+++ b/test/hotspot/jtreg/ProblemList.txt Fri Apr 27 12:29:49 2018 +0100
@@ -92,6 +92,5 @@
# Java EE Module Removal
#
-runtime/modules/PatchModule/PatchModuleClassList.java 8194310 generic-all Java EE Module Removal
compiler/c2/Test8007294.java 8194310 generic-all Java EE Module Removal
compiler/c2/Test6852078.java 8194310 generic-all Java EE Module Removal
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/vectorization/TestUnexpectedLoadOrdering.java Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8201367
+ * @summary RPO walk of counted loop block doesn't properly order loads
+ *
+ * @run main/othervm -XX:-BackgroundCompilation -XX:-UseCountedLoopSafepoints TestUnexpectedLoadOrdering
+ *
+ */
+
+public class TestUnexpectedLoadOrdering {
+
+ public static void main(String[] args) {
+ double[] array1 = new double[1000];
+ double[] array2 = new double[1000];
+ for (int i = 0; i < 20_000; i++) {
+ test(array1, array2);
+ }
+ }
+
+ private static double test(double[] array1, double[] array2) {
+ double res = 0;
+ for (int i = 0; i < array1.length; i++) {
+ array2[i] = i;
+ res += array1[i];
+ }
+ return res;
+ }
+}
--- a/test/hotspot/jtreg/runtime/modules/PatchModule/PatchModuleClassList.java Fri Apr 27 11:33:22 2018 +0100
+++ b/test/hotspot/jtreg/runtime/modules/PatchModule/PatchModuleClassList.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@
public class PatchModuleClassList {
private static final String BOOT_CLASS = "javax/naming/spi/NamingManager";
- private static final String PLATFORM_CLASS = "javax/transaction/InvalidTransactionException";
+ private static final String PLATFORM_CLASS = "java/sql/ResultSet";
public static void main(String args[]) throws Throwable {
// Case 1. A class to be loaded by the boot class loader
@@ -68,7 +68,9 @@
"-XX:DumpLoadedClassList=" + classList,
"--patch-module=java.naming=" + moduleJar,
"PatchModuleMain", BOOT_CLASS.replace('/', '.'));
- new OutputAnalyzer(pb.start()).shouldHaveExitValue(0);
+ OutputAnalyzer oa = new OutputAnalyzer(pb.start());
+ oa.shouldContain("I pass!");
+ oa.shouldHaveExitValue(0);
// check the generated classlist file
String content = new String(Files.readAllBytes(Paths.get(classList)));
@@ -78,30 +80,32 @@
// Case 2. A class to be loaded by the platform class loader
- // Create a class file in the module java.transaction. This class file
- // will be put in the javatransaction.jar file.
- source = "package javax.transaction; " +
- "public class InvalidTransactionException { " +
+ // Create a class file in the module java.sql. This class file
+ // will be put in the javasql.jar file.
+ source = "package java.sql; " +
+ "public class ResultSet { " +
" static { " +
- " System.out.println(\"I pass!\"); " +
+ " System.out.println(\"I pass too!\"); " +
" } " +
"}";
ClassFileInstaller.writeClassToDisk(PLATFORM_CLASS,
- InMemoryJavaCompiler.compile(PLATFORM_CLASS.replace('/', '.'), source, "--patch-module=java.transaction"),
+ InMemoryJavaCompiler.compile(PLATFORM_CLASS.replace('/', '.'), source, "--patch-module=java.sql"),
System.getProperty("test.classes"));
- // Build the jar file that will be used for the module "java.transaction".
- BasicJarBuilder.build("javatransaction", PLATFORM_CLASS);
- moduleJar = BasicJarBuilder.getTestJar("javatransaction.jar");
+ // Build the jar file that will be used for the module "java.sql".
+ BasicJarBuilder.build("javasql", PLATFORM_CLASS);
+ moduleJar = BasicJarBuilder.getTestJar("javasql.jar");
- classList = "javatransaction.list";
+ classList = "javasql.list";
pb = ProcessTools.createJavaProcessBuilder(
true,
"-XX:DumpLoadedClassList=" + classList,
- "--patch-module=java.naming=" + moduleJar,
+ "--patch-module=java.sql=" + moduleJar,
"PatchModuleMain", PLATFORM_CLASS.replace('/', '.'));
- new OutputAnalyzer(pb.start()).shouldHaveExitValue(0);
+ OutputAnalyzer oa2 = new OutputAnalyzer(pb.start());
+ oa2.shouldContain("I pass too!");
+ oa2.shouldHaveExitValue(0);
// check the generated classlist file
content = new String(Files.readAllBytes(Paths.get(classList)));
--- a/test/jdk/ProblemList.txt Fri Apr 27 11:33:22 2018 +0100
+++ b/test/jdk/ProblemList.txt Fri Apr 27 12:29:49 2018 +0100
@@ -513,6 +513,9 @@
java/io/FileOutputStream/AtomicAppend.java 8202062 macosx-all
java/io/pathNames/GeneralWin32.java 8180264 windows-all
+java/io/FileInputStream/UnreferencedFISClosesFd.java 8202292 linux-all
+java/io/FileOutputStream/UnreferencedFOSClosesFd.java 8202292 linux-all
+java/io/RandomAccessFile/UnreferencedRAFClosesFd.java 8202292 linux-all
############################################################################
--- a/test/jdk/java/util/TimeZone/Bug8149452.java Fri Apr 27 11:33:22 2018 +0100
+++ b/test/jdk/java/util/TimeZone/Bug8149452.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,7 +22,7 @@
*/
/*
* @test
- * @bug 8149452 8151876
+ * @bug 8149452 8151876 8181157
* @summary Check the missing time zone names.
*/
import java.text.DateFormatSymbols;
@@ -34,6 +34,20 @@
public class Bug8149452 {
public static void main(String[] args) {
+ // These zone ids are new in tzdb and yet to be reflected in
+ // CLDR data. Needs to be excluded from the test.
+ // This list is as of CLDR version 29, and should be examined
+ // on the CLDR data upgrade.
+ List<String> NEW_ZONEIDS = List.of(
+ "America/Punta_Arenas",
+ "Asia/Atyrau",
+ "Asia/Barnaul",
+ "Asia/Famagusta",
+ "Asia/Tomsk",
+ "Europe/Astrakhan",
+ "Europe/Kirov",
+ "Europe/Saratov",
+ "Europe/Ulyanovsk");
List<String> listNotFound = new ArrayList<>();
String[][] zoneStrings = DateFormatSymbols.getInstance()
@@ -42,10 +56,9 @@
if (!Arrays.stream(zoneStrings)
.anyMatch(zone -> tzID.equalsIgnoreCase(zone[0]))) {
// to ignore names for Etc/GMT[+-][0-9]+ which are not supported
- // Also ignore the TimeZone DisplayNames with GMT[+-]:hh:mm
if (!tzID.startsWith("Etc/GMT")
&& !tzID.startsWith("GMT")
- && !TimeZone.getTimeZone(tzID).getDisplayName().startsWith("GMT")) {
+ && !NEW_ZONEIDS.contains(tzID)) {
listNotFound.add(tzID);
}
}
--- a/test/jdk/java/util/TimeZone/CLDRDisplayNamesTest.java Fri Apr 27 11:33:22 2018 +0100
+++ b/test/jdk/java/util/TimeZone/CLDRDisplayNamesTest.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
/*
* @test
- * @bug 8005471 8008577 8129881 8130845 8136518
+ * @bug 8005471 8008577 8129881 8130845 8136518 8181157
* @modules jdk.localedata
* @run main/othervm -Djava.locale.providers=CLDR CLDRDisplayNamesTest
* @summary Make sure that localized time zone names of CLDR are used
@@ -47,27 +47,27 @@
{
"ja-JP",
"\u30a2\u30e1\u30ea\u30ab\u592a\u5e73\u6d0b\u6a19\u6e96\u6642",
- "PST",
+ "GMT-08:00",
"\u30a2\u30e1\u30ea\u30ab\u592a\u5e73\u6d0b\u590f\u6642\u9593",
- "PDT",
+ "GMT-07:00",
//"\u30a2\u30e1\u30ea\u30ab\u592a\u5e73\u6d0b\u6642\u9593",
//"PT"
},
{
"zh-CN",
"\u5317\u7f8e\u592a\u5e73\u6d0b\u6807\u51c6\u65f6\u95f4",
- "PST",
+ "GMT-08:00",
"\u5317\u7f8e\u592a\u5e73\u6d0b\u590f\u4ee4\u65f6\u95f4",
- "PDT",
+ "GMT-07:00",
//"\u5317\u7f8e\u592a\u5e73\u6d0b\u65f6\u95f4",
//"PT",
},
{
"de-DE",
"Nordamerikanische Westk\u00fcsten-Normalzeit",
- "PST",
+ "GMT-08:00",
"Nordamerikanische Westk\u00fcsten-Sommerzeit",
- "PDT",
+ "GMT-07:00",
//"Nordamerikanische Westk\u00fcstenzeit",
//"PT",
},
--- a/test/jdk/java/util/TimeZone/TimeZoneTest.java Fri Apr 27 11:33:22 2018 +0100
+++ b/test/jdk/java/util/TimeZone/TimeZoneTest.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,7 +24,7 @@
/*
* @test
* @bug 4028006 4044013 4096694 4107276 4107570 4112869 4130885 7039469 7126465 7158483
- * 8008577 8077685 8098547 8133321 8138716 8148446 8151876 8159684 8166875
+ * 8008577 8077685 8098547 8133321 8138716 8148446 8151876 8159684 8166875 8181157
* @modules java.base/sun.util.resources
* @library /java/text/testlib
* @summary test TimeZone
@@ -364,6 +364,7 @@
}
else if (!name.equals("Pacific Standard Time") &&
!name.equals("\u592a\u5e73\u6d0b\u6807\u51c6\u65f6\u95f4") &&
+ !name.equals("\u5317\u7f8e\u592a\u5e73\u6d0b\u6807\u51c6\u65f6\u95f4") &&
!name.equals("GMT-08:00") &&
!name.equals("GMT-8:00") &&
!name.equals("GMT-0800") &&
--- a/test/jdk/sun/text/resources/LocaleData.cldr Fri Apr 27 11:33:22 2018 +0100
+++ b/test/jdk/sun/text/resources/LocaleData.cldr Fri Apr 27 12:29:49 2018 +0100
@@ -5563,7 +5563,7 @@
# bug 6507067
TimeZoneNames/zh_TW/Asia\/Taipei/1=\u53f0\u5317\u6a19\u6e96\u6642\u9593
-TimeZoneNames/zh_TW/Asia\/Taipei/2=CST
+TimeZoneNames/zh_TW/Asia\/Taipei/2=
# bug 6645271
FormatData/hr_HR/DatePatterns/2=d. MMM y.
--- a/test/jdk/sun/text/resources/LocaleDataTest.java Fri Apr 27 11:33:22 2018 +0100
+++ b/test/jdk/sun/text/resources/LocaleDataTest.java Fri Apr 27 12:29:49 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
* 7114053 7074882 7040556 8008577 8013836 8021121 6192407 6931564 8027695
* 8017142 8037343 8055222 8042126 8074791 8075173 8080774 8129361 8134916
* 8145136 8145952 8164784 8037111 8081643 7037368 8178872 8185841 8190918
- * 8187946 8195478
+ * 8187946 8195478 8181157
* @summary Verify locale data
* @modules java.base/sun.util.resources
* @modules jdk.localedata
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/sun/util/resources/cldr/TimeZoneNamesTest.java Fri Apr 27 12:29:49 2018 +0100
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+ /*
+ * @test
+ * @bug 8181157
+ * @modules jdk.localedata
+ * @summary Checks CLDR time zone names are generated correctly at runtime
+ * @run testng/othervm -Djava.locale.providers=CLDR TimeZoneNamesTest
+ */
+
+import static org.testng.Assert.assertEquals;
+
+import java.time.ZoneId;
+import java.time.format.TextStyle;
+import java.util.Locale;
+import java.util.TimeZone;
+
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+@Test
+public class TimeZoneNamesTest {
+
+ @DataProvider(name="noResourceTZs")
+ Object[][] data() {
+ return new Object[][] {
+ // tzid, locale, style, expected
+
+ // These zone ids are new in tzdb and yet to be reflected in
+ // CLDR data. Thus it's assured there is no l10n names for these.
+ // This list is as of CLDR version 29, and should be examined
+ // on the CLDR data upgrade.
+ {"America/Punta_Arenas", Locale.US, "Punta Arenas Standard Time",
+ "GMT-03:00",
+ "Punta Arenas Daylight Time",
+ "GMT-03:00",
+ "Punta Arenas Time",
+ "GMT-03:00"},
+ {"America/Punta_Arenas", Locale.FRANCE, "Punta Arenas (heure standard)",
+ "UTC\u221203:00",
+ "Punta Arenas (heure d\u2019\u00e9t\u00e9)",
+ "UTC\u221203:00",
+ "heure : Punta Arenas",
+ "UTC\u221203:00"},
+ {"Asia/Atyrau", Locale.US, "Atyrau Standard Time",
+ "GMT+05:00",
+ "Atyrau Daylight Time",
+ "GMT+05:00",
+ "Atyrau Time",
+ "GMT+05:00"},
+ {"Asia/Atyrau", Locale.FRANCE, "Atyrau (heure standard)",
+ "UTC+05:00",
+ "Atyrau (heure d\u2019\u00e9t\u00e9)",
+ "UTC+05:00",
+ "heure : Atyrau",
+ "UTC+05:00"},
+
+ // no "metazone" zones
+ {"Asia/Srednekolymsk", Locale.US, "Srednekolymsk Time",
+ "SRET",
+ "Srednekolymsk Daylight Time",
+ "SREDT",
+ "Srednekolymsk Time",
+ "SRET"},
+ {"Asia/Srednekolymsk", Locale.FRANCE, "Srednekolymsk (heure standard)",
+ "UTC+11:00",
+ "Srednekolymsk (heure standard)",
+ "UTC+11:00",
+ "heure : Srednekolymsk",
+ "UTC+11:00"},
+ {"Pacific/Bougainville", Locale.US, "Bougainville Standard Time",
+ "BST",
+ "Bougainville Daylight Time",
+ "BST",
+ "Bougainville Time",
+ "BT"},
+ {"Pacific/Bougainville", Locale.FRANCE, "Bougainville (heure standard)",
+ "UTC+11:00",
+ "Bougainville (heure standard)",
+ "UTC+11:00",
+ "heure : Bougainville",
+ "UTC+11:00"},
+
+ };
+ }
+
+
+ @Test(dataProvider="noResourceTZs")
+ public void test_tzNames(String tzid, Locale locale, String lstd, String sstd, String ldst, String sdst, String lgen, String sgen) {
+ // Standard time
+ assertEquals(TimeZone.getTimeZone(tzid).getDisplayName(false, TimeZone.LONG, locale), lstd);
+ assertEquals(TimeZone.getTimeZone(tzid).getDisplayName(false, TimeZone.SHORT, locale), sstd);
+
+ // daylight saving time
+ assertEquals(TimeZone.getTimeZone(tzid).getDisplayName(true, TimeZone.LONG, locale), ldst);
+ assertEquals(TimeZone.getTimeZone(tzid).getDisplayName(true, TimeZone.SHORT, locale), sdst);
+
+ // generic name
+ assertEquals(ZoneId.of(tzid).getDisplayName(TextStyle.FULL, locale), lgen);
+ assertEquals(ZoneId.of(tzid).getDisplayName(TextStyle.SHORT, locale), sgen);
+ }
+}