8003985: Support @Contended Annotation - JEP 142
Summary: HotSpot changes to support @Contended annotation.
Reviewed-by: coleenp, kvn, jrose
Contributed-by: Aleksey Shipilev <aleksey.shipilev@oracle.com>
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java Mon Jan 14 15:17:47 2013 +0100
@@ -52,6 +52,8 @@
private static int LOW_OFFSET;
private static int HIGH_OFFSET;
private static int FIELD_SLOTS;
+ private static short FIELDINFO_TAG_SIZE;
+ private static short FIELDINFO_TAG_OFFSET;
// ClassState constants
private static int CLASS_STATE_ALLOCATED;
@@ -96,9 +98,12 @@
NAME_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::name_index_offset").intValue();
SIGNATURE_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::signature_index_offset").intValue();
INITVAL_INDEX_OFFSET = db.lookupIntConstant("FieldInfo::initval_index_offset").intValue();
- LOW_OFFSET = db.lookupIntConstant("FieldInfo::low_offset").intValue();
- HIGH_OFFSET = db.lookupIntConstant("FieldInfo::high_offset").intValue();
+ LOW_OFFSET = db.lookupIntConstant("FieldInfo::low_packed_offset").intValue();
+ HIGH_OFFSET = db.lookupIntConstant("FieldInfo::high_packed_offset").intValue();
FIELD_SLOTS = db.lookupIntConstant("FieldInfo::field_slots").intValue();
+ FIELDINFO_TAG_SIZE = db.lookupIntConstant("FIELDINFO_TAG_SIZE").shortValue();
+ FIELDINFO_TAG_OFFSET = db.lookupIntConstant("FIELDINFO_TAG_OFFSET").shortValue();
+
// read ClassState constants
CLASS_STATE_ALLOCATED = db.lookupIntConstant("InstanceKlass::allocated").intValue();
CLASS_STATE_LOADED = db.lookupIntConstant("InstanceKlass::loaded").intValue();
@@ -314,8 +319,12 @@
public int getFieldOffset(int index) {
U2Array fields = getFields();
- return VM.getVM().buildIntFromShorts(fields.at(index * FIELD_SLOTS + LOW_OFFSET),
- fields.at(index * FIELD_SLOTS + HIGH_OFFSET));
+ short lo = fields.at(index * FIELD_SLOTS + LOW_OFFSET);
+ short hi = fields.at(index * FIELD_SLOTS + HIGH_OFFSET);
+ if ((lo & FIELDINFO_TAG_SIZE) == FIELDINFO_TAG_OFFSET) {
+ return VM.getVM().buildIntFromShorts(lo, hi) >> FIELDINFO_TAG_SIZE;
+ }
+ throw new RuntimeException("should not reach here");
}
// Accessors for declared fields
--- a/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/cpu/sparc/vm/vm_version_sparc.cpp Mon Jan 14 15:17:47 2013 +0100
@@ -259,6 +259,10 @@
if (!has_vis1()) // Drop to 0 if no VIS1 support
UseVIS = 0;
+ if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
+ (cache_line_size > ContendedPaddingWidth))
+ ContendedPaddingWidth = cache_line_size;
+
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print("Allocation");
@@ -286,6 +290,9 @@
if (PrefetchFieldsAhead > 0) {
tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead);
}
+ if (ContendedPaddingWidth > 0) {
+ tty->print_cr("ContendedPaddingWidth %d", ContendedPaddingWidth);
+ }
}
#endif // PRODUCT
}
--- a/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/cpu/x86/vm/vm_version_x86.cpp Mon Jan 14 15:17:47 2013 +0100
@@ -734,6 +734,10 @@
PrefetchFieldsAhead = prefetch_fields_ahead();
#endif
+ if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
+ (cache_line_size > ContendedPaddingWidth))
+ ContendedPaddingWidth = cache_line_size;
+
#ifndef PRODUCT
if (PrintMiscellaneous && Verbose) {
tty->print_cr("Logical CPUs per core: %u",
@@ -780,6 +784,9 @@
if (PrefetchFieldsAhead > 0) {
tty->print_cr("PrefetchFieldsAhead %d", PrefetchFieldsAhead);
}
+ if (ContendedPaddingWidth > 0) {
+ tty->print_cr("ContendedPaddingWidth %d", ContendedPaddingWidth);
+ }
}
#endif // !PRODUCT
}
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Mon Jan 14 15:17:47 2013 +0100
@@ -970,6 +970,12 @@
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
+ parse_annotations(loader_data,
+ runtime_visible_annotations,
+ runtime_visible_annotations_length,
+ cp,
+ parsed_annotations,
+ CHECK);
cfs->skip_u1(runtime_visible_annotations_length, CHECK);
} else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_annotations()) {
runtime_invisible_annotations_length = attribute_length;
@@ -1216,19 +1222,16 @@
field->initialize(access_flags.as_short(),
name_index,
signature_index,
- constantvalue_index,
- 0);
- if (parsed_annotations.has_any_annotations())
- parsed_annotations.apply_to(field);
-
+ constantvalue_index);
BasicType type = cp->basic_type_for_signature_at(signature_index);
// Remember how many oops we encountered and compute allocation type
FieldAllocationType atype = fac->update(is_static, type);
-
- // The correct offset is computed later (all oop fields will be located together)
- // We temporarily store the allocation type in the offset field
- field->set_offset(atype);
+ field->set_allocation_type(atype);
+
+ // After field is initialized with type, we can augment it with aux info
+ if (parsed_annotations.has_any_annotations())
+ parsed_annotations.apply_to(field);
}
int index = length;
@@ -1259,17 +1262,13 @@
field->initialize(JVM_ACC_FIELD_INTERNAL,
injected[n].name_index,
injected[n].signature_index,
- 0,
0);
BasicType type = FieldType::basic_type(injected[n].signature());
// Remember how many oops we encountered and compute allocation type
FieldAllocationType atype = fac->update(false, type);
-
- // The correct offset is computed later (all oop fields will be located together)
- // We temporarily store the allocation type in the offset field
- field->set_offset(atype);
+ field->set_allocation_type(atype);
index++;
}
}
@@ -1735,7 +1734,8 @@
}
// Sift through annotations, looking for those significant to the VM:
-void ClassFileParser::parse_annotations(u1* buffer, int limit,
+void ClassFileParser::parse_annotations(ClassLoaderData* loader_data,
+ u1* buffer, int limit,
constantPoolHandle cp,
ClassFileParser::AnnotationCollector* coll,
TRAPS) {
@@ -1752,9 +1752,12 @@
e_type_off = 7, // utf8 such as 'Ljava/lang/annotation/RetentionPolicy;'
e_con_off = 9, // utf8 payload, such as 'SOURCE', 'CLASS', 'RUNTIME'
e_size = 11, // end of 'e' annotation
- c_tag_val = 'c',
- c_con_off = 7, // utf8 payload, such as 'I' or 'Ljava/lang/String;'
+ c_tag_val = 'c', // payload is type
+ c_con_off = 7, // utf8 payload, such as 'I'
c_size = 9, // end of 'c' annotation
+ s_tag_val = 's', // payload is String
+ s_con_off = 7, // utf8 payload, such as 'Ljava/lang/String;'
+ s_size = 9,
min_size = 6 // smallest possible size (zero members)
};
while ((--nann) >= 0 && (index-2 + min_size <= limit)) {
@@ -1773,57 +1776,65 @@
}
// Here is where parsing particular annotations will take place.
- AnnotationCollector::ID id = coll->annotation_index(aname);
+ AnnotationCollector::ID id = coll->annotation_index(loader_data, aname);
if (id == AnnotationCollector::_unknown) continue;
coll->set_annotation(id);
- // If there are no values, just set the bit and move on:
- if (count == 0) continue;
-
- // For the record, here is how annotation payloads can be collected.
- // Suppose we want to capture @Retention.value. Here is how:
- //if (id == AnnotationCollector::_class_Retention) {
- // Symbol* payload = NULL;
- // if (count == 1
- // && e_size == (index0 - index) // match size
- // && e_tag_val == *(abase + tag_off)
- // && (check_symbol_at(cp, Bytes::get_Java_u2(abase + e_type_off))
- // == vmSymbols::RetentionPolicy_signature())
- // && member == vmSymbols::value_name()) {
- // payload = check_symbol_at(cp, Bytes::get_Java_u2(abase + e_con_off));
- // }
- // check_property(payload != NULL,
- // "Invalid @Retention annotation at offset %u in class file %s",
- // index0, CHECK);
- // if (payload != NULL) {
- // payload->increment_refcount();
- // coll->_class_RetentionPolicy = payload;
- // }
- //}
+
+ if (id == AnnotationCollector::_sun_misc_Contended) {
+ if (count == 1
+ && s_size == (index - index0) // match size
+ && s_tag_val == *(abase + tag_off)
+ && member == vmSymbols::value_name()) {
+ u2 group_index = Bytes::get_Java_u2(abase + s_con_off);
+ coll->set_contended_group(group_index);
+ } else {
+ coll->set_contended_group(0); // default contended group
+ }
+ coll->set_contended(true);
+ } else {
+ coll->set_contended(false);
+ }
}
}
-ClassFileParser::AnnotationCollector::ID ClassFileParser::AnnotationCollector::annotation_index(Symbol* name) {
+ClassFileParser::AnnotationCollector::ID
+ClassFileParser::AnnotationCollector::annotation_index(ClassLoaderData* loader_data,
+ Symbol* name) {
vmSymbols::SID sid = vmSymbols::find_sid(name);
+ bool privileged = false;
+ if (loader_data->is_the_null_class_loader_data()) {
+ // Privileged code can use all annotations. Other code silently drops some.
+ privileged = true;
+ }
switch (sid) {
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_ForceInline_signature):
if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
return _method_ForceInline;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_DontInline_signature):
if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
return _method_DontInline;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Compiled_signature):
if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Compiled;
case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_LambdaForm_Hidden_signature):
if (_location != _in_method) break; // only allow for methods
+ if (!privileged) break; // only allow in privileged code
return _method_LambdaForm_Hidden;
+ case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_misc_Contended_signature):
+ if (_location != _in_field && _location != _in_class) break; // only allow for fields and classes
+ if (!EnableContended || (RestrictContended && !privileged)) break; // honor privileges
+ return _sun_misc_Contended;
default: break;
}
return AnnotationCollector::_unknown;
}
void ClassFileParser::FieldAnnotationCollector::apply_to(FieldInfo* f) {
- fatal("no field annotations yet");
+ if (is_contended())
+ f->set_contended_group(contended_group());
}
void ClassFileParser::MethodAnnotationCollector::apply_to(methodHandle m) {
@@ -1838,7 +1849,7 @@
}
void ClassFileParser::ClassAnnotationCollector::apply_to(instanceKlassHandle k) {
- fatal("no class annotations yet");
+ k->set_is_contended(is_contended());
}
@@ -2181,7 +2192,8 @@
runtime_visible_annotations_length = method_attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
- parse_annotations(runtime_visible_annotations,
+ parse_annotations(loader_data,
+ runtime_visible_annotations,
runtime_visible_annotations_length, cp, &parsed_annotations,
CHECK_(nullHandle));
cfs->skip_u1(runtime_visible_annotations_length, CHECK_(nullHandle));
@@ -2886,7 +2898,8 @@
runtime_visible_annotations_length = attribute_length;
runtime_visible_annotations = cfs->get_u1_buffer();
assert(runtime_visible_annotations != NULL, "null visible annotations");
- parse_annotations(runtime_visible_annotations,
+ parse_annotations(loader_data,
+ runtime_visible_annotations,
runtime_visible_annotations_length,
cp,
parsed_annotations,
@@ -3405,18 +3418,21 @@
// Size of Java itable (in words)
itable_size = access_flags.is_interface() ? 0 : klassItable::compute_itable_size(transitive_interfaces);
+ // get the padding width from the option
+ // TODO: Ask VM about specific CPU we are running on
+ int pad_size = ContendedPaddingWidth;
+
// Field size and offset computation
int nonstatic_field_size = super_klass() == NULL ? 0 : super_klass->nonstatic_field_size();
#ifndef PRODUCT
int orig_nonstatic_field_size = 0;
#endif
- int static_field_size = 0;
int next_static_oop_offset;
int next_static_double_offset;
int next_static_word_offset;
int next_static_short_offset;
int next_static_byte_offset;
- int next_static_type_offset;
+ int next_static_padded_offset;
int next_nonstatic_oop_offset;
int next_nonstatic_double_offset;
int next_nonstatic_word_offset;
@@ -3426,11 +3442,36 @@
int first_nonstatic_oop_offset;
int first_nonstatic_field_offset;
int next_nonstatic_field_offset;
+ int next_nonstatic_padded_offset;
+
+ // Count the contended fields by type.
+ int static_contended_count = 0;
+ int nonstatic_contended_count = 0;
+ FieldAllocationCount fac_contended;
+ for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+ if (fs.is_contended()) {
+ fac_contended.count[atype]++;
+ if (fs.access_flags().is_static()) {
+ static_contended_count++;
+ } else {
+ nonstatic_contended_count++;
+ }
+ }
+ }
+ int contended_count = static_contended_count + nonstatic_contended_count;
+
// Calculate the starting byte offsets
next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
+
+ // class is contended, pad before all the fields
+ if (parsed_annotations.is_contended()) {
+ next_static_oop_offset += pad_size;
+ }
+
next_static_double_offset = next_static_oop_offset +
- (fac.count[STATIC_OOP] * heapOopSize);
+ ((fac.count[STATIC_OOP] - fac_contended.count[STATIC_OOP]) * heapOopSize);
if ( fac.count[STATIC_DOUBLE] &&
(Universe::field_type_should_be_aligned(T_DOUBLE) ||
Universe::field_type_should_be_aligned(T_LONG)) ) {
@@ -3438,25 +3479,29 @@
}
next_static_word_offset = next_static_double_offset +
- (fac.count[STATIC_DOUBLE] * BytesPerLong);
+ ((fac.count[STATIC_DOUBLE] - fac_contended.count[STATIC_DOUBLE]) * BytesPerLong);
next_static_short_offset = next_static_word_offset +
- (fac.count[STATIC_WORD] * BytesPerInt);
+ ((fac.count[STATIC_WORD] - fac_contended.count[STATIC_WORD]) * BytesPerInt);
next_static_byte_offset = next_static_short_offset +
- (fac.count[STATIC_SHORT] * BytesPerShort);
- next_static_type_offset = align_size_up((next_static_byte_offset +
- fac.count[STATIC_BYTE] ), wordSize );
- static_field_size = (next_static_type_offset -
- next_static_oop_offset) / wordSize;
+ ((fac.count[STATIC_SHORT] - fac_contended.count[STATIC_SHORT]) * BytesPerShort);
+ next_static_padded_offset = next_static_byte_offset +
+ ((fac.count[STATIC_BYTE] - fac_contended.count[STATIC_BYTE]) * 1);
first_nonstatic_field_offset = instanceOopDesc::base_offset_in_bytes() +
nonstatic_field_size * heapOopSize;
+
+ // class is contended, pad before all the fields
+ if (parsed_annotations.is_contended()) {
+ first_nonstatic_field_offset += pad_size;
+ }
+
next_nonstatic_field_offset = first_nonstatic_field_offset;
- unsigned int nonstatic_double_count = fac.count[NONSTATIC_DOUBLE];
- unsigned int nonstatic_word_count = fac.count[NONSTATIC_WORD];
- unsigned int nonstatic_short_count = fac.count[NONSTATIC_SHORT];
- unsigned int nonstatic_byte_count = fac.count[NONSTATIC_BYTE];
- unsigned int nonstatic_oop_count = fac.count[NONSTATIC_OOP];
+ unsigned int nonstatic_double_count = fac.count[NONSTATIC_DOUBLE] - fac_contended.count[NONSTATIC_DOUBLE];
+ unsigned int nonstatic_word_count = fac.count[NONSTATIC_WORD] - fac_contended.count[NONSTATIC_WORD];
+ unsigned int nonstatic_short_count = fac.count[NONSTATIC_SHORT] - fac_contended.count[NONSTATIC_SHORT];
+ unsigned int nonstatic_byte_count = fac.count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE];
+ unsigned int nonstatic_oop_count = fac.count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP];
bool super_has_nonstatic_fields =
(super_klass() != NULL && super_klass->has_nonstatic_fields());
@@ -3529,12 +3574,12 @@
}
if( allocation_style == 0 ) {
- // Fields order: oops, longs/doubles, ints, shorts/chars, bytes
+ // Fields order: oops, longs/doubles, ints, shorts/chars, bytes, padded fields
next_nonstatic_oop_offset = next_nonstatic_field_offset;
next_nonstatic_double_offset = next_nonstatic_oop_offset +
(nonstatic_oop_count * heapOopSize);
} else if( allocation_style == 1 ) {
- // Fields order: longs/doubles, ints, shorts/chars, bytes, oops
+ // Fields order: longs/doubles, ints, shorts/chars, bytes, oops, padded fields
next_nonstatic_double_offset = next_nonstatic_field_offset;
} else if( allocation_style == 2 ) {
// Fields allocation: oops fields in super and sub classes are together.
@@ -3613,27 +3658,33 @@
(nonstatic_word_count * BytesPerInt);
next_nonstatic_byte_offset = next_nonstatic_short_offset +
(nonstatic_short_count * BytesPerShort);
-
- int notaligned_offset;
- if( allocation_style == 0 ) {
- notaligned_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
- } else { // allocation_style == 1
- next_nonstatic_oop_offset = next_nonstatic_byte_offset + nonstatic_byte_count;
+ next_nonstatic_padded_offset = next_nonstatic_byte_offset +
+ nonstatic_byte_count;
+
+ // let oops jump before padding with this allocation style
+ if( allocation_style == 1 ) {
+ next_nonstatic_oop_offset = next_nonstatic_padded_offset;
if( nonstatic_oop_count > 0 ) {
next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
}
- notaligned_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
+ next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
}
- next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
- nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
- - first_nonstatic_field_offset)/heapOopSize);
// Iterate over fields again and compute correct offsets.
// The field allocation type was temporarily stored in the offset slot.
// oop fields are located before non-oop fields (static and non-static).
for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ // contended fields are handled below
+ if (fs.is_contended()) continue;
+
int real_offset;
- FieldAllocationType atype = (FieldAllocationType) fs.offset();
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+
+ // pack the rest of the fields
switch (atype) {
case STATIC_OOP:
real_offset = next_static_oop_offset;
@@ -3722,13 +3773,225 @@
fs.set_offset(real_offset);
}
+
+ // Handle the contended cases.
+ //
+ // Each contended field should not intersect the cache line with another contended field.
+ // In the absence of alignment information, we end up with pessimistically separating
+ // the fields with full-width padding.
+ //
+ // Additionally, this should not break alignment for the fields, so we round the alignment up
+ // for each field.
+ if (contended_count > 0) {
+
+ // if there is at least one contended field, we need to have pre-padding for them
+ if (nonstatic_contended_count > 0) {
+ next_nonstatic_padded_offset += pad_size;
+ }
+
+ // collect all contended groups
+ BitMap bm(cp->size());
+ for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ if (fs.is_contended()) {
+ bm.set_bit(fs.contended_group());
+ }
+ }
+
+ int current_group = -1;
+ while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
+
+ for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ // skip non-contended fields and fields from different group
+ if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
+
+ // handle statics below
+ if (fs.access_flags().is_static()) continue;
+
+ int real_offset;
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+
+ switch (atype) {
+ case NONSTATIC_BYTE:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += 1;
+ break;
+
+ case NONSTATIC_SHORT:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += BytesPerShort;
+ break;
+
+ case NONSTATIC_WORD:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += BytesPerInt;
+ break;
+
+ case NONSTATIC_DOUBLE:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += BytesPerLong;
+ break;
+
+ case NONSTATIC_OOP:
+ next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize);
+ real_offset = next_nonstatic_padded_offset;
+ next_nonstatic_padded_offset += heapOopSize;
+
+ // Create new oop map
+ nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
+ nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
+ nonstatic_oop_map_count += 1;
+ if( first_nonstatic_oop_offset == 0 ) { // Undefined
+ first_nonstatic_oop_offset = real_offset;
+ }
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ if (fs.contended_group() == 0) {
+ // Contended group defines the equivalence class over the fields:
+ // the fields within the same contended group are not inter-padded.
+ // The only exception is default group, which does not incur the
+ // equivalence, and so requires intra-padding.
+ next_nonstatic_padded_offset += pad_size;
+ }
+
+ fs.set_offset(real_offset);
+ } // for
+
+ // Start laying out the next group.
+ // Note that this will effectively pad the last group in the back;
+ // this is expected to alleviate memory contention effects for
+ // subclass fields and/or adjacent object.
+ // If this was the default group, the padding is already in place.
+ if (current_group != 0) {
+ next_nonstatic_padded_offset += pad_size;
+ }
+ }
+
+ // handle static fields
+
+ // if there is at least one contended field, we need to have pre-padding for them
+ if (static_contended_count > 0) {
+ next_static_padded_offset += pad_size;
+ }
+
+ current_group = -1;
+ while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
+
+ for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+
+ // skip already laid out fields
+ if (fs.is_offset_set()) continue;
+
+ // skip non-contended fields and fields from different group
+ if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
+
+ // non-statics already handled above
+ if (!fs.access_flags().is_static()) continue;
+
+ int real_offset;
+ FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
+
+ switch (atype) {
+
+ case STATIC_BYTE:
+ next_static_padded_offset = align_size_up(next_static_padded_offset, 1);
+ real_offset = next_static_padded_offset;
+ next_static_padded_offset += 1;
+ break;
+
+ case STATIC_SHORT:
+ next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerShort);
+ real_offset = next_static_padded_offset;
+ next_static_padded_offset += BytesPerShort;
+ break;
+
+ case STATIC_WORD:
+ next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerInt);
+ real_offset = next_static_padded_offset;
+ next_static_padded_offset += BytesPerInt;
+ break;
+
+ case STATIC_DOUBLE:
+ next_static_padded_offset = align_size_up(next_static_padded_offset, BytesPerLong);
+ real_offset = next_static_padded_offset;
+ next_static_padded_offset += BytesPerLong;
+ break;
+
+ case STATIC_OOP:
+ next_static_padded_offset = align_size_up(next_static_padded_offset, heapOopSize);
+ real_offset = next_static_padded_offset;
+ next_static_padded_offset += heapOopSize;
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ if (fs.contended_group() == 0) {
+ // Contended group defines the equivalence class over the fields:
+ // the fields within the same contended group are not inter-padded.
+ // The only exception is default group, which does not incur the
+ // equivalence, and so requires intra-padding.
+ next_static_padded_offset += pad_size;
+ }
+
+ fs.set_offset(real_offset);
+ } // for
+
+ // Start laying out the next group.
+ // Note that this will effectively pad the last group in the back;
+ // this is expected to alleviate memory contention effects for
+ // subclass fields and/or adjacent object.
+ // If this was the default group, the padding is already in place.
+ if (current_group != 0) {
+ next_static_padded_offset += pad_size;
+ }
+
+ }
+
+ } // handle contended
+
// Size of instances
int instance_size;
+ int notaligned_offset = next_nonstatic_padded_offset;
+
+ // Entire class is contended, pad in the back.
+ // This helps to alleviate memory contention effects for subclass fields
+ // and/or adjacent object.
+ if (parsed_annotations.is_contended()) {
+ notaligned_offset += pad_size;
+ next_static_padded_offset += pad_size;
+ }
+
+ int next_static_type_offset = align_size_up(next_static_padded_offset, wordSize);
+ int static_field_size = (next_static_type_offset -
+ InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
+
+ next_nonstatic_type_offset = align_size_up(notaligned_offset, heapOopSize );
+ nonstatic_field_size = nonstatic_field_size + ((next_nonstatic_type_offset
+ - first_nonstatic_field_offset)/heapOopSize);
+
next_nonstatic_type_offset = align_size_up(notaligned_offset, wordSize );
instance_size = align_object_size(next_nonstatic_type_offset / wordSize);
- assert(instance_size == align_object_size(align_size_up((instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), wordSize) / wordSize), "consistent layout helper value");
+ assert(instance_size == align_object_size(align_size_up(
+ (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize + ((parsed_annotations.is_contended()) ? pad_size : 0)),
+ wordSize) / wordSize), "consistent layout helper value");
// Number of non-static oop map blocks allocated at end of klass.
const unsigned int total_oop_map_count =
@@ -4008,6 +4271,18 @@
}
#endif
+#ifndef PRODUCT
+ if (PrintFieldLayout) {
+ print_field_layout(name,
+ fields,
+ cp,
+ instance_size,
+ first_nonstatic_field_offset,
+ next_nonstatic_field_offset,
+ next_static_type_offset);
+ }
+#endif
+
// preserve result across HandleMark
preserve_this_klass = this_klass();
}
@@ -4020,6 +4295,38 @@
return this_klass;
}
+void ClassFileParser::print_field_layout(Symbol* name,
+ Array<u2>* fields,
+ constantPoolHandle cp,
+ int instance_size,
+ int instance_fields_start,
+ int instance_fields_end,
+ int static_fields_end) {
+ tty->print("%s: field layout\n", name->as_klass_external_name());
+ tty->print(" @%3d %s\n", instance_fields_start, "--- instance fields start ---");
+ for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+ if (!fs.access_flags().is_static()) {
+ tty->print(" @%3d \"%s\" %s\n",
+ fs.offset(),
+ fs.name()->as_klass_external_name(),
+ fs.signature()->as_klass_external_name());
+ }
+ }
+ tty->print(" @%3d %s\n", instance_fields_end, "--- instance fields end ---");
+ tty->print(" @%3d %s\n", instance_size * wordSize, "--- instance ends ---");
+ tty->print(" @%3d %s\n", InstanceMirrorKlass::offset_of_static_fields(), "--- static fields start ---");
+ for (AllFieldStream fs(fields, cp); !fs.done(); fs.next()) {
+ if (fs.access_flags().is_static()) {
+ tty->print(" @%3d \"%s\" %s\n",
+ fs.offset(),
+ fs.name()->as_klass_external_name(),
+ fs.signature()->as_klass_external_name());
+ }
+ }
+ tty->print(" @%3d %s\n", static_fields_end, "--- static fields end ---");
+ tty->print("\n");
+}
+
unsigned int
ClassFileParser::compute_oop_map_count(instanceKlassHandle super,
unsigned int nonstatic_oop_map_count,
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp Mon Jan 14 15:17:47 2013 +0100
@@ -95,17 +95,20 @@
_method_DontInline,
_method_LambdaForm_Compiled,
_method_LambdaForm_Hidden,
+ _sun_misc_Contended,
_annotation_LIMIT
};
const Location _location;
int _annotations_present;
+ u2 _contended_group;
+
AnnotationCollector(Location location)
: _location(location), _annotations_present(0)
{
assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
}
// If this annotation name has an ID, report it (or _none).
- ID annotation_index(Symbol* name);
+ ID annotation_index(ClassLoaderData* loader_data, Symbol* name);
// Set the annotation name:
void set_annotation(ID id) {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
@@ -114,6 +117,12 @@
// Report if the annotation is present.
bool has_any_annotations() { return _annotations_present != 0; }
bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
+
+ void set_contended_group(u2 group) { _contended_group = group; }
+ u2 contended_group() { return _contended_group; }
+
+ void set_contended(bool contended) { set_annotation(_sun_misc_Contended); }
+ bool is_contended() { return has_annotation(_sun_misc_Contended); }
};
class FieldAnnotationCollector: public AnnotationCollector {
public:
@@ -177,6 +186,14 @@
Array<AnnotationArray*>** fields_type_annotations,
u2* java_fields_count_ptr, TRAPS);
+ void print_field_layout(Symbol* name,
+ Array<u2>* fields,
+ constantPoolHandle cp,
+ int instance_size,
+ int instance_fields_start,
+ int instance_fields_end,
+ int static_fields_end);
+
// Method parsing
methodHandle parse_method(ClassLoaderData* loader_data,
constantPoolHandle cp,
@@ -247,7 +264,8 @@
int runtime_invisible_annotations_length, TRAPS);
int skip_annotation(u1* buffer, int limit, int index);
int skip_annotation_value(u1* buffer, int limit, int index);
- void parse_annotations(u1* buffer, int limit, constantPoolHandle cp,
+ void parse_annotations(ClassLoaderData* loader_data,
+ u1* buffer, int limit, constantPoolHandle cp,
/* Results (currently, only one result is supported): */
AnnotationCollector* result,
TRAPS);
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Mon Jan 14 15:17:47 2013 +0100
@@ -194,7 +194,10 @@
template(java_lang_VirtualMachineError, "java/lang/VirtualMachineError") \
template(java_lang_StackOverflowError, "java/lang/StackOverflowError") \
template(java_lang_StackTraceElement, "java/lang/StackTraceElement") \
+ \
+ /* Concurrency support */ \
template(java_util_concurrent_locks_AbstractOwnableSynchronizer, "java/util/concurrent/locks/AbstractOwnableSynchronizer") \
+ template(sun_misc_Contended_signature, "Lsun/misc/Contended;") \
\
/* class symbols needed by intrinsics */ \
VM_INTRINSICS_DO(VM_INTRINSIC_IGNORE, template, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE) \
@@ -284,7 +287,7 @@
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
- \
+ \
/* common method and field names */ \
template(object_initializer_name, "<init>") \
template(class_initializer_name, "<clinit>") \
--- a/hotspot/src/share/vm/oops/fieldInfo.hpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/oops/fieldInfo.hpp Mon Jan 14 15:17:47 2013 +0100
@@ -43,14 +43,29 @@
public:
// fields
// Field info extracted from the class file and stored
- // as an array of 7 shorts
+ // as an array of 6 shorts.
+
+#define FIELDINFO_TAG_SIZE 2
+#define FIELDINFO_TAG_BLANK 0
+#define FIELDINFO_TAG_OFFSET 1
+#define FIELDINFO_TAG_TYPE_PLAIN 2
+#define FIELDINFO_TAG_TYPE_CONTENDED 3
+#define FIELDINFO_TAG_MASK 3
+
+ // Packed field has the tag, and can be either of:
+ // hi bits <--------------------------- lo bits
+ // |---------high---------|---------low---------|
+ // ..........................................00 - blank
+ // [------------------offset----------------]01 - real field offset
+ // ......................[-------type-------]10 - plain field with type
+ // [--contention_group--][-------type-------]11 - contended field with type and contention group
enum FieldOffset {
access_flags_offset = 0,
name_index_offset = 1,
signature_index_offset = 2,
initval_index_offset = 3,
- low_offset = 4,
- high_offset = 5,
+ low_packed_offset = 4,
+ high_packed_offset = 5,
field_slots = 6
};
@@ -76,17 +91,90 @@
void initialize(u2 access_flags,
u2 name_index,
u2 signature_index,
- u2 initval_index,
- u4 offset) {
+ u2 initval_index) {
_shorts[access_flags_offset] = access_flags;
_shorts[name_index_offset] = name_index;
_shorts[signature_index_offset] = signature_index;
_shorts[initval_index_offset] = initval_index;
- set_offset(offset);
+ _shorts[low_packed_offset] = 0;
+ _shorts[high_packed_offset] = 0;
}
u2 access_flags() const { return _shorts[access_flags_offset]; }
- u4 offset() const { return build_int_from_shorts(_shorts[low_offset], _shorts[high_offset]); }
+ u4 offset() const {
+ u2 lo = _shorts[low_packed_offset];
+ switch(lo & FIELDINFO_TAG_MASK) {
+ case FIELDINFO_TAG_OFFSET:
+ return build_int_from_shorts(_shorts[low_packed_offset], _shorts[high_packed_offset]) >> FIELDINFO_TAG_SIZE;
+#ifndef PRODUCT
+ case FIELDINFO_TAG_TYPE_PLAIN:
+ ShouldNotReachHere2("Asking offset for the plain type field");
+ case FIELDINFO_TAG_TYPE_CONTENDED:
+ ShouldNotReachHere2("Asking offset for the contended type field");
+ case FIELDINFO_TAG_BLANK:
+ ShouldNotReachHere2("Asking offset for the blank field");
+#endif
+ }
+ ShouldNotReachHere();
+ return 0;
+ }
+
+ bool is_contended() const {
+ u2 lo = _shorts[low_packed_offset];
+ switch(lo & FIELDINFO_TAG_MASK) {
+ case FIELDINFO_TAG_TYPE_PLAIN:
+ return false;
+ case FIELDINFO_TAG_TYPE_CONTENDED:
+ return true;
+#ifndef PRODUCT
+ case FIELDINFO_TAG_OFFSET:
+ ShouldNotReachHere2("Asking contended flag for the field with offset");
+ case FIELDINFO_TAG_BLANK:
+ ShouldNotReachHere2("Asking contended flag for the blank field");
+#endif
+ }
+ ShouldNotReachHere();
+ return false;
+ }
+
+ u2 contended_group() const {
+ u2 lo = _shorts[low_packed_offset];
+ switch(lo & FIELDINFO_TAG_MASK) {
+ case FIELDINFO_TAG_TYPE_PLAIN:
+ return 0;
+ case FIELDINFO_TAG_TYPE_CONTENDED:
+ return _shorts[high_packed_offset];
+#ifndef PRODUCT
+ case FIELDINFO_TAG_OFFSET:
+ ShouldNotReachHere2("Asking the contended group for the field with offset");
+ case FIELDINFO_TAG_BLANK:
+ ShouldNotReachHere2("Asking the contended group for the blank field");
+#endif
+ }
+ ShouldNotReachHere();
+ return 0;
+ }
+
+ u2 allocation_type() const {
+ u2 lo = _shorts[low_packed_offset];
+ switch(lo & FIELDINFO_TAG_MASK) {
+ case FIELDINFO_TAG_TYPE_PLAIN:
+ case FIELDINFO_TAG_TYPE_CONTENDED:
+ return (lo >> FIELDINFO_TAG_SIZE);
+#ifndef PRODUCT
+ case FIELDINFO_TAG_OFFSET:
+ ShouldNotReachHere2("Asking the field type for field with offset");
+ case FIELDINFO_TAG_BLANK:
+ ShouldNotReachHere2("Asking the field type for the blank field");
+#endif
+ }
+ ShouldNotReachHere();
+ return 0;
+ }
+
+ bool is_offset_set() const {
+ return (_shorts[low_packed_offset] & FIELDINFO_TAG_MASK) == FIELDINFO_TAG_OFFSET;
+ }
Symbol* name(constantPoolHandle cp) const {
int index = name_index();
@@ -106,8 +194,46 @@
void set_access_flags(u2 val) { _shorts[access_flags_offset] = val; }
void set_offset(u4 val) {
- _shorts[low_offset] = extract_low_short_from_int(val);
- _shorts[high_offset] = extract_high_short_from_int(val);
+ val = val << FIELDINFO_TAG_SIZE; // make room for tag
+ _shorts[low_packed_offset] = extract_low_short_from_int(val) | FIELDINFO_TAG_OFFSET;
+ _shorts[high_packed_offset] = extract_high_short_from_int(val);
+ }
+
+ void set_allocation_type(int type) {
+ u2 lo = _shorts[low_packed_offset];
+ switch(lo & FIELDINFO_TAG_MASK) {
+ case FIELDINFO_TAG_BLANK:
+ _shorts[low_packed_offset] = ((type << FIELDINFO_TAG_SIZE)) & 0xFFFF;
+ _shorts[low_packed_offset] &= ~FIELDINFO_TAG_MASK;
+ _shorts[low_packed_offset] |= FIELDINFO_TAG_TYPE_PLAIN;
+ return;
+#ifndef PRODUCT
+ case FIELDINFO_TAG_TYPE_PLAIN:
+ case FIELDINFO_TAG_TYPE_CONTENDED:
+ case FIELDINFO_TAG_OFFSET:
+ ShouldNotReachHere2("Setting the field type with overwriting");
+#endif
+ }
+ ShouldNotReachHere();
+ }
+
+ void set_contended_group(u2 val) {
+ u2 lo = _shorts[low_packed_offset];
+ switch(lo & FIELDINFO_TAG_MASK) {
+ case FIELDINFO_TAG_TYPE_PLAIN:
+ _shorts[low_packed_offset] |= FIELDINFO_TAG_TYPE_CONTENDED;
+ _shorts[high_packed_offset] = val;
+ return;
+#ifndef PRODUCT
+ case FIELDINFO_TAG_TYPE_CONTENDED:
+ ShouldNotReachHere2("Overwriting contended group");
+ case FIELDINFO_TAG_BLANK:
+ ShouldNotReachHere2("Setting contended group for the blank field");
+ case FIELDINFO_TAG_OFFSET:
+ ShouldNotReachHere2("Setting contended group for field with offset");
+#endif
+ }
+ ShouldNotReachHere();
}
bool is_internal() const {
--- a/hotspot/src/share/vm/oops/fieldStreams.hpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/oops/fieldStreams.hpp Mon Jan 14 15:17:47 2013 +0100
@@ -160,9 +160,26 @@
return field()->offset();
}
+ int allocation_type() const {
+ return field()->allocation_type();
+ }
+
void set_offset(int offset) {
field()->set_offset(offset);
}
+
+ bool is_offset_set() const {
+ return field()->is_offset_set();
+ }
+
+ bool is_contended() const {
+ return field()->is_contended();
+ }
+
+ int contended_group() const {
+ return field()->contended_group();
+ }
+
};
// Iterate over only the internal fields
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Mon Jan 14 15:17:47 2013 +0100
@@ -230,7 +230,8 @@
_misc_rewritten = 1 << 0, // methods rewritten.
_misc_has_nonstatic_fields = 1 << 1, // for sizing with UseCompressedOops
_misc_should_verify_class = 1 << 2, // allow caching of preverification
- _misc_is_anonymous = 1 << 3 // has embedded _inner_classes field
+ _misc_is_anonymous = 1 << 3, // has embedded _inner_classes field
+ _misc_is_contended = 1 << 4 // marked with contended annotation
};
u2 _misc_flags;
u2 _minor_version; // minor version number of class file
@@ -550,6 +551,17 @@
return is_anonymous() ? java_mirror() : class_loader();
}
+ bool is_contended() const {
+ return (_misc_flags & _misc_is_contended) != 0;
+ }
+ void set_is_contended(bool value) {
+ if (value) {
+ _misc_flags |= _misc_is_contended;
+ } else {
+ _misc_flags &= ~_misc_is_contended;
+ }
+ }
+
// signers
objArrayOop signers() const { return _signers; }
void set_signers(objArrayOop s) { klass_oop_store((oop*)&_signers, s); }
--- a/hotspot/src/share/vm/runtime/globals.hpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Mon Jan 14 15:17:47 2013 +0100
@@ -1075,7 +1075,7 @@
\
product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" ) \
\
- product(intx, hashCode, 0, \
+ product(intx, hashCode, 5, \
"(Unstable) select hashCode generation algorithm" ) \
\
product(intx, WorkAroundNPTLTimedWaitHang, 1, \
@@ -1173,6 +1173,18 @@
notproduct(bool, PrintCompactFieldsSavings, false, \
"Print how many words were saved with CompactFields") \
\
+ notproduct(bool, PrintFieldLayout, false, \
+ "Print field layout for each class") \
+ \
+ product(intx, ContendedPaddingWidth, 128, \
+ "How many bytes to pad the fields/classes marked @Contended with")\
+ \
+ product(bool, EnableContended, true, \
+ "Enable @Contended annotation support") \
+ \
+ product(bool, RestrictContended, true, \
+ "Restrict @Contended to trusted classes") \
+ \
product(bool, UseBiasedLocking, true, \
"Enable biased locking in JVM") \
\
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Jan 11 09:53:24 2013 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Mon Jan 14 15:17:47 2013 +0100
@@ -2284,10 +2284,17 @@
declare_constant(FieldInfo::name_index_offset) \
declare_constant(FieldInfo::signature_index_offset) \
declare_constant(FieldInfo::initval_index_offset) \
- declare_constant(FieldInfo::low_offset) \
- declare_constant(FieldInfo::high_offset) \
+ declare_constant(FieldInfo::low_packed_offset) \
+ declare_constant(FieldInfo::high_packed_offset) \
declare_constant(FieldInfo::field_slots) \
\
+ /*************************************/ \
+ /* FieldInfo tag constants */ \
+ /*************************************/ \
+ \
+ declare_preprocessor_constant("FIELDINFO_TAG_SIZE", FIELDINFO_TAG_SIZE) \
+ declare_preprocessor_constant("FIELDINFO_TAG_OFFSET", FIELDINFO_TAG_OFFSET) \
+ \
/************************************************/ \
/* InstanceKlass InnerClassAttributeOffset enum */ \
/************************************************/ \