--- a/hotspot/.hgtags Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/.hgtags Wed Jul 05 18:28:20 2017 +0200
@@ -289,3 +289,5 @@
d0337c31c8be7716369b4e7c3bd5f352983c6a06 hs25-b06
dccd40de8db1fa96f186e6179907818d75320440 jdk8-b62
dc16fe422c535ecd4e9f80fb814a1bb9704da6f5 hs25-b07
+acabb5c282f59be7e3238920b2ea06b684ab68f7 jdk8-b63
+8cb93eadfb6dcab88d91b8e2cd3e0e07d0ac4048 hs25-b08
--- a/hotspot/make/Makefile Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/make/Makefile Wed Jul 05 18:28:20 2017 +0200
@@ -453,14 +453,30 @@
ifeq ($(JVM_VARIANT_ZEROSHARK), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo): $(SHARK_DIR)/%.debuginfo
+ $(install-file)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(SHARK_DIR)/%.diz
+ $(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
+ $(EXPORT_SERVER_DIR)/%.debuginfo: $(SHARK_DIR)/%.debuginfo
+ $(install-file)
+ $(EXPORT_SERVER_DIR)/%.diz: $(SHARK_DIR)/%.diz
+ $(install-file)
endif
ifeq ($(JVM_VARIANT_ZERO), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo
+ $(install-file)
+ $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: $(ZERO_DIR)/%.diz
+ $(install-file)
$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
$(install-file)
+ $(EXPORT_SERVER_DIR)/%.debuginfo: $(ZERO_DIR)/%.debuginfo
+ $(install-file)
+ $(EXPORT_SERVER_DIR)/%.diz: $(ZERO_DIR)/%.diz
+ $(install-file)
endif
ifeq ($(JVM_VARIANT_MINIMAL1), true)
$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
--- a/hotspot/make/hotspot_version Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/make/hotspot_version Wed Jul 05 18:28:20 2017 +0200
@@ -35,7 +35,7 @@
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=07
+HS_BUILD_NUMBER=08
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
--- a/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/cppInterpreterGenerator_zero.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -31,12 +31,17 @@
return _masm;
}
- protected:
- address generate_entry(address entry_point) {
- ZeroEntry *entry = (ZeroEntry *) assembler()->pc();
- assembler()->advance(sizeof(ZeroEntry));
+ public:
+ static address generate_entry_impl(MacroAssembler* masm, address entry_point) {
+ ZeroEntry *entry = (ZeroEntry *) masm->pc();
+ masm->advance(sizeof(ZeroEntry));
entry->set_entry_point(entry_point);
return (address) entry;
}
+ protected:
+ address generate_entry(address entry_point) {
+ return generate_entry_impl(assembler(), entry_point);
+ }
+
#endif // CPU_ZERO_VM_CPPINTERPRETERGENERATOR_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -180,25 +180,6 @@
method, istate->osr_entry(), istate->osr_buf(), THREAD);
return;
}
- else if (istate->msg() == BytecodeInterpreter::call_method_handle) {
- oop method_handle = istate->callee();
-
- // Trim back the stack to put the parameters at the top
- stack->set_sp(istate->stack() + 1);
-
- // Make the call
- process_method_handle(method_handle, THREAD);
- fixup_after_potential_safepoint();
-
- // Convert the result
- istate->set_stack(stack->sp() - 1);
-
- // Restore the stack
- stack->set_sp(istate->stack_limit() + 1);
-
- // Resume the interpreter
- istate->set_msg(BytecodeInterpreter::method_resume);
- }
else {
ShouldNotReachHere();
}
@@ -535,35 +516,35 @@
if (entry->is_volatile()) {
switch (entry->flag_state()) {
case ctos:
- SET_LOCALS_INT(object->char_field_acquire(entry->f2()), 0);
+ SET_LOCALS_INT(object->char_field_acquire(entry->f2_as_index()), 0);
break;
case btos:
- SET_LOCALS_INT(object->byte_field_acquire(entry->f2()), 0);
+ SET_LOCALS_INT(object->byte_field_acquire(entry->f2_as_index()), 0);
break;
case stos:
- SET_LOCALS_INT(object->short_field_acquire(entry->f2()), 0);
+ SET_LOCALS_INT(object->short_field_acquire(entry->f2_as_index()), 0);
break;
case itos:
- SET_LOCALS_INT(object->int_field_acquire(entry->f2()), 0);
+ SET_LOCALS_INT(object->int_field_acquire(entry->f2_as_index()), 0);
break;
case ltos:
- SET_LOCALS_LONG(object->long_field_acquire(entry->f2()), 0);
+ SET_LOCALS_LONG(object->long_field_acquire(entry->f2_as_index()), 0);
break;
case ftos:
- SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2()), 0);
+ SET_LOCALS_FLOAT(object->float_field_acquire(entry->f2_as_index()), 0);
break;
case dtos:
- SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2()), 0);
+ SET_LOCALS_DOUBLE(object->double_field_acquire(entry->f2_as_index()), 0);
break;
case atos:
- SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2()), 0);
+ SET_LOCALS_OBJECT(object->obj_field_acquire(entry->f2_as_index()), 0);
break;
default:
@@ -573,35 +554,35 @@
else {
switch (entry->flag_state()) {
case ctos:
- SET_LOCALS_INT(object->char_field(entry->f2()), 0);
+ SET_LOCALS_INT(object->char_field(entry->f2_as_index()), 0);
break;
case btos:
- SET_LOCALS_INT(object->byte_field(entry->f2()), 0);
+ SET_LOCALS_INT(object->byte_field(entry->f2_as_index()), 0);
break;
case stos:
- SET_LOCALS_INT(object->short_field(entry->f2()), 0);
+ SET_LOCALS_INT(object->short_field(entry->f2_as_index()), 0);
break;
case itos:
- SET_LOCALS_INT(object->int_field(entry->f2()), 0);
+ SET_LOCALS_INT(object->int_field(entry->f2_as_index()), 0);
break;
case ltos:
- SET_LOCALS_LONG(object->long_field(entry->f2()), 0);
+ SET_LOCALS_LONG(object->long_field(entry->f2_as_index()), 0);
break;
case ftos:
- SET_LOCALS_FLOAT(object->float_field(entry->f2()), 0);
+ SET_LOCALS_FLOAT(object->float_field(entry->f2_as_index()), 0);
break;
case dtos:
- SET_LOCALS_DOUBLE(object->double_field(entry->f2()), 0);
+ SET_LOCALS_DOUBLE(object->double_field(entry->f2_as_index()), 0);
break;
case atos:
- SET_LOCALS_OBJECT(object->obj_field(entry->f2()), 0);
+ SET_LOCALS_OBJECT(object->obj_field(entry->f2_as_index()), 0);
break;
default:
@@ -629,516 +610,6 @@
return 0;
}
-int CppInterpreter::method_handle_entry(Method* method,
- intptr_t UNUSED, TRAPS) {
- JavaThread *thread = (JavaThread *) THREAD;
- ZeroStack *stack = thread->zero_stack();
- int argument_slots = method->size_of_parameters();
- int result_slots = type2size[result_type_of(method)];
- intptr_t *vmslots = stack->sp();
- intptr_t *unwind_sp = vmslots + argument_slots;
-
- // Find the MethodType
- address p = (address) method;
- for (jint* pc = method->method_type_offsets_chain(); (*pc) != -1; pc++) {
- p = *(address*)(p + (*pc));
- }
- oop method_type = (oop) p;
-
- // The MethodHandle is in the slot after the arguments
- int num_vmslots = argument_slots - 1;
- oop method_handle = VMSLOTS_OBJECT(num_vmslots);
-
- // InvokeGeneric requires some extra shuffling
- oop mhtype = java_lang_invoke_MethodHandle::type(method_handle);
- bool is_exact = mhtype == method_type;
- if (!is_exact) {
- if (true || // FIXME
- method->intrinsic_id() == vmIntrinsics::_invokeExact) {
- CALL_VM_NOCHECK_NOFIX(
- SharedRuntime::throw_WrongMethodTypeException(
- thread, method_type, mhtype));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- stack->set_sp(unwind_sp);
- return 0;
- }
- assert(method->intrinsic_id() == vmIntrinsics::_invokeGeneric, "should be");
-
- // Load up an adapter from the calling type
- // NB the x86 code for this (in methodHandles_x86.cpp, search for
- // "genericInvoker") is really really odd. I'm hoping it's trying
- // to accomodate odd VM/class library combinations I can ignore.
- oop adapter = NULL; //FIXME: load the adapter from the CP cache
- IF (adapter == NULL) {
- CALL_VM_NOCHECK_NOFIX(
- SharedRuntime::throw_WrongMethodTypeException(
- thread, method_type, mhtype));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- stack->set_sp(unwind_sp);
- return 0;
- }
-
- // Adapters are shared among form-families of method-type. The
- // type being called is passed as a trusted first argument so that
- // the adapter knows the actual types of its arguments and return
- // values.
- insert_vmslots(num_vmslots + 1, 1, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- // NB all oops trashed!
- stack->set_sp(unwind_sp);
- return 0;
- }
-
- vmslots = stack->sp();
- num_vmslots++;
- SET_VMSLOTS_OBJECT(method_type, num_vmslots);
-
- method_handle = adapter;
- }
-
- // Start processing
- process_method_handle(method_handle, THREAD);
- if (HAS_PENDING_EXCEPTION)
- result_slots = 0;
-
- // If this is an invokeExact then the eventual callee will not
- // have unwound the method handle argument so we have to do it.
- // If a result is being returned the it will be above the method
- // handle argument we're unwinding.
- if (is_exact) {
- intptr_t result[2];
- for (int i = 0; i < result_slots; i++)
- result[i] = stack->pop();
- stack->pop();
- for (int i = result_slots - 1; i >= 0; i--)
- stack->push(result[i]);
- }
-
- // Check
- assert(stack->sp() == unwind_sp - result_slots, "should be");
-
- // No deoptimized frames on the stack
- return 0;
-}
-
-void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
- JavaThread *thread = (JavaThread *) THREAD;
- ZeroStack *stack = thread->zero_stack();
- intptr_t *vmslots = stack->sp();
-
- bool direct_to_method = false;
- BasicType src_rtype = T_ILLEGAL;
- BasicType dst_rtype = T_ILLEGAL;
-
- MethodHandleEntry *entry =
- java_lang_invoke_MethodHandle::vmentry(method_handle);
- MethodHandles::EntryKind entry_kind =
- (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff);
-
- Method* method = NULL;
- switch (entry_kind) {
- case MethodHandles::_invokestatic_mh:
- direct_to_method = true;
- break;
-
- case MethodHandles::_invokespecial_mh:
- case MethodHandles::_invokevirtual_mh:
- case MethodHandles::_invokeinterface_mh:
- {
- oop receiver =
- VMSLOTS_OBJECT(
- java_lang_invoke_MethodHandle::vmslots(method_handle) - 1);
- if (receiver == NULL) {
- stack->set_sp(calculate_unwind_sp(stack, method_handle));
- CALL_VM_NOCHECK_NOFIX(
- throw_exception(
- thread, vmSymbols::java_lang_NullPointerException()));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- return;
- }
- if (entry_kind != MethodHandles::_invokespecial_mh) {
- intptr_t index = java_lang_invoke_DirectMethodHandle::vmindex(method_handle);
- InstanceKlass* rcvrKlass =
- (InstanceKlass *) receiver->klass();
- if (entry_kind == MethodHandles::_invokevirtual_mh) {
- method = (Method*) rcvrKlass->start_of_vtable()[index];
- }
- else {
- oop iclass = java_lang_invoke_MethodHandle::next_target(method_handle);
- itableOffsetEntry* ki =
- (itableOffsetEntry *) rcvrKlass->start_of_itable();
- int i, length = rcvrKlass->itable_length();
- for (i = 0; i < length; i++, ki++ ) {
- if (ki->interface_klass() == iclass)
- break;
- }
- if (i == length) {
- stack->set_sp(calculate_unwind_sp(stack, method_handle));
- CALL_VM_NOCHECK_NOFIX(
- throw_exception(
- thread, vmSymbols::java_lang_IncompatibleClassChangeError()));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- return;
- }
- itableMethodEntry* im = ki->first_method_entry(receiver->klass());
- method = im[index].method();
- if (method == NULL) {
- stack->set_sp(calculate_unwind_sp(stack, method_handle));
- CALL_VM_NOCHECK_NOFIX(
- throw_exception(
- thread, vmSymbols::java_lang_AbstractMethodError()));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- return;
- }
- }
- }
- }
- direct_to_method = true;
- break;
-
- case MethodHandles::_bound_ref_direct_mh:
- case MethodHandles::_bound_int_direct_mh:
- case MethodHandles::_bound_long_direct_mh:
- direct_to_method = true;
- // fall through
- case MethodHandles::_bound_ref_mh:
- case MethodHandles::_bound_int_mh:
- case MethodHandles::_bound_long_mh:
- {
- BasicType arg_type = T_ILLEGAL;
- int arg_mask = -1;
- int arg_slots = -1;
- MethodHandles::get_ek_bound_mh_info(
- entry_kind, arg_type, arg_mask, arg_slots);
- int arg_slot =
- java_lang_invoke_BoundMethodHandle::vmargslot(method_handle);
-
- // Create the new slot(s)
- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
- insert_vmslots(arg_slot, arg_slots, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- // all oops trashed
- stack->set_sp(unwind_sp);
- return;
- }
- vmslots = stack->sp();
-
- // Store bound argument into new stack slot
- oop arg = java_lang_invoke_BoundMethodHandle::argument(method_handle);
- if (arg_type == T_OBJECT) {
- assert(arg_slots == 1, "should be");
- SET_VMSLOTS_OBJECT(arg, arg_slot);
- }
- else {
- jvalue arg_value;
- arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
- switch (arg_type) {
- case T_BOOLEAN:
- SET_VMSLOTS_INT(arg_value.z, arg_slot);
- break;
- case T_CHAR:
- SET_VMSLOTS_INT(arg_value.c, arg_slot);
- break;
- case T_BYTE:
- SET_VMSLOTS_INT(arg_value.b, arg_slot);
- break;
- case T_SHORT:
- SET_VMSLOTS_INT(arg_value.s, arg_slot);
- break;
- case T_INT:
- SET_VMSLOTS_INT(arg_value.i, arg_slot);
- break;
- case T_FLOAT:
- SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
- break;
- case T_LONG:
- SET_VMSLOTS_LONG(arg_value.j, arg_slot + 1);
- break;
- case T_DOUBLE:
- SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot + 1);
- break;
- default:
- tty->print_cr("unhandled type %s", type2name(arg_type));
- ShouldNotReachHere();
- }
- }
- }
- break;
-
- case MethodHandles::_adapter_retype_only:
- case MethodHandles::_adapter_retype_raw:
- src_rtype = result_type_of_handle(
- java_lang_invoke_MethodHandle::next_target(method_handle));
- dst_rtype = result_type_of_handle(method_handle);
- break;
-
- case MethodHandles::_adapter_check_cast:
- {
- int arg_slot =
- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
- oop arg = VMSLOTS_OBJECT(arg_slot);
- if (arg != NULL) {
- Klass* objKlassOop = arg->klass();
- Klass* klassOf = java_lang_Class::as_Klass(
- java_lang_invoke_AdapterMethodHandle::argument(method_handle));
-
- if (objKlassOop != klassOf &&
- !objKlassOop->is_subtype_of(klassOf)) {
- ResourceMark rm(THREAD);
- const char* objName = Klass::cast(objKlassOop)->external_name();
- const char* klassName = Klass::cast(klassOf)->external_name();
- char* message = SharedRuntime::generate_class_cast_message(
- objName, klassName);
-
- stack->set_sp(calculate_unwind_sp(stack, method_handle));
- CALL_VM_NOCHECK_NOFIX(
- throw_exception(
- thread, vmSymbols::java_lang_ClassCastException(), message));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- return;
- }
- }
- }
- break;
-
- case MethodHandles::_adapter_dup_args:
- {
- int arg_slot =
- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
- int conv =
- java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
- int num_slots = -MethodHandles::adapter_conversion_stack_move(conv);
- assert(num_slots > 0, "should be");
-
- // Create the new slot(s)
- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
- stack->overflow_check(num_slots, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- // all oops trashed
- stack->set_sp(unwind_sp);
- return;
- }
-
- // Duplicate the arguments
- for (int i = num_slots - 1; i >= 0; i--)
- stack->push(*VMSLOTS_SLOT(arg_slot + i));
-
- vmslots = stack->sp(); // unused, but let the compiler figure that out
- }
- break;
-
- case MethodHandles::_adapter_drop_args:
- {
- int arg_slot =
- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
- int conv =
- java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
- int num_slots = MethodHandles::adapter_conversion_stack_move(conv);
- assert(num_slots > 0, "should be");
-
- remove_vmslots(arg_slot, num_slots, THREAD); // doesn't trap
- vmslots = stack->sp(); // unused, but let the compiler figure that out
- }
- break;
-
- case MethodHandles::_adapter_opt_swap_1:
- case MethodHandles::_adapter_opt_swap_2:
- case MethodHandles::_adapter_opt_rot_1_up:
- case MethodHandles::_adapter_opt_rot_1_down:
- case MethodHandles::_adapter_opt_rot_2_up:
- case MethodHandles::_adapter_opt_rot_2_down:
- {
- int arg1 =
- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
- int conv =
- java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
- int arg2 = MethodHandles::adapter_conversion_vminfo(conv);
-
- int swap_bytes = 0, rotate = 0;
- MethodHandles::get_ek_adapter_opt_swap_rot_info(
- entry_kind, swap_bytes, rotate);
- int swap_slots = swap_bytes >> LogBytesPerWord;
-
- intptr_t tmp;
- switch (rotate) {
- case 0: // swap
- for (int i = 0; i < swap_slots; i++) {
- tmp = *VMSLOTS_SLOT(arg1 + i);
- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(arg2 + i), arg1 + i);
- SET_VMSLOTS_SLOT(&tmp, arg2 + i);
- }
- break;
-
- case 1: // up
- assert(arg1 - swap_slots > arg2, "should be");
-
- tmp = *VMSLOTS_SLOT(arg1);
- for (int i = arg1 - swap_slots; i >= arg2; i--)
- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + swap_slots);
- SET_VMSLOTS_SLOT(&tmp, arg2);
-
- break;
-
- case -1: // down
- assert(arg2 - swap_slots > arg1, "should be");
-
- tmp = *VMSLOTS_SLOT(arg1);
- for (int i = arg1 + swap_slots; i <= arg2; i++)
- SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i - swap_slots);
- SET_VMSLOTS_SLOT(&tmp, arg2);
- break;
-
- default:
- ShouldNotReachHere();
- }
- }
- break;
-
- case MethodHandles::_adapter_opt_i2l:
- {
- int arg_slot =
- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
- int arg = VMSLOTS_INT(arg_slot);
- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
- insert_vmslots(arg_slot, 1, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- // all oops trashed
- stack->set_sp(unwind_sp);
- return;
- }
- vmslots = stack->sp();
- arg_slot++;
- SET_VMSLOTS_LONG(arg, arg_slot);
- }
- break;
-
- case MethodHandles::_adapter_opt_unboxi:
- case MethodHandles::_adapter_opt_unboxl:
- {
- int arg_slot =
- java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
- oop arg = VMSLOTS_OBJECT(arg_slot);
- jvalue arg_value;
- if (arg == NULL) {
- // queue a nullpointer exception for the caller
- stack->set_sp(calculate_unwind_sp(stack, method_handle));
- CALL_VM_NOCHECK_NOFIX(
- throw_exception(
- thread, vmSymbols::java_lang_NullPointerException()));
- // NB all oops trashed!
- assert(HAS_PENDING_EXCEPTION, "should do");
- return;
- }
- BasicType arg_type = java_lang_boxing_object::get_value(arg, &arg_value);
- if (arg_type == T_LONG || arg_type == T_DOUBLE) {
- intptr_t *unwind_sp = calculate_unwind_sp(stack, method_handle);
- insert_vmslots(arg_slot, 1, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- // all oops trashed
- stack->set_sp(unwind_sp);
- return;
- }
- vmslots = stack->sp();
- arg_slot++;
- }
- switch (arg_type) {
- case T_BOOLEAN:
- SET_VMSLOTS_INT(arg_value.z, arg_slot);
- break;
- case T_CHAR:
- SET_VMSLOTS_INT(arg_value.c, arg_slot);
- break;
- case T_BYTE:
- SET_VMSLOTS_INT(arg_value.b, arg_slot);
- break;
- case T_SHORT:
- SET_VMSLOTS_INT(arg_value.s, arg_slot);
- break;
- case T_INT:
- SET_VMSLOTS_INT(arg_value.i, arg_slot);
- break;
- case T_FLOAT:
- SET_VMSLOTS_FLOAT(arg_value.f, arg_slot);
- break;
- case T_LONG:
- SET_VMSLOTS_LONG(arg_value.j, arg_slot);
- break;
- case T_DOUBLE:
- SET_VMSLOTS_DOUBLE(arg_value.d, arg_slot);
- break;
- default:
- tty->print_cr("unhandled type %s", type2name(arg_type));
- ShouldNotReachHere();
- }
- }
- break;
-
- default:
- tty->print_cr("unhandled entry_kind %s",
- MethodHandles::entry_name(entry_kind));
- ShouldNotReachHere();
- }
-
- // Continue along the chain
- if (direct_to_method) {
- if (method == NULL) {
- method =
- (Method*) java_lang_invoke_MethodHandle::vmtarget(method_handle);
- }
- address entry_point = method->from_interpreted_entry();
- Interpreter::invoke_method(method, entry_point, THREAD);
- }
- else {
- process_method_handle(
- java_lang_invoke_MethodHandle::next_target(method_handle), THREAD);
- }
- // NB all oops now trashed
-
- // Adapt the result type, if necessary
- if (src_rtype != dst_rtype && !HAS_PENDING_EXCEPTION) {
- switch (dst_rtype) {
- case T_VOID:
- for (int i = 0; i < type2size[src_rtype]; i++)
- stack->pop();
- return;
-
- case T_INT:
- switch (src_rtype) {
- case T_VOID:
- stack->overflow_check(1, CHECK);
- stack->push(0);
- return;
-
- case T_BOOLEAN:
- case T_CHAR:
- case T_BYTE:
- case T_SHORT:
- return;
- }
- // INT results sometimes need narrowing
- case T_BOOLEAN:
- case T_CHAR:
- case T_BYTE:
- case T_SHORT:
- switch (src_rtype) {
- case T_INT:
- return;
- }
- }
-
- tty->print_cr("unhandled conversion:");
- tty->print_cr("src_rtype = %s", type2name(src_rtype));
- tty->print_cr("dst_rtype = %s", type2name(dst_rtype));
- ShouldNotReachHere();
- }
-}
-
// The new slots will be inserted before slot insert_before.
// Slots < insert_before will have the same slot number after the insert.
// Slots >= insert_before will become old_slot + num_slots.
@@ -1380,10 +851,6 @@
entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
break;
- case Interpreter::method_handle:
- entry_point = ((InterpreterGenerator*) this)->generate_method_handle_entry();
- break;
-
case Interpreter::java_lang_math_sin:
case Interpreter::java_lang_math_cos:
case Interpreter::java_lang_math_tan:
@@ -1391,6 +858,8 @@
case Interpreter::java_lang_math_log:
case Interpreter::java_lang_math_log10:
case Interpreter::java_lang_math_sqrt:
+ case Interpreter::java_lang_math_pow:
+ case Interpreter::java_lang_math_exp:
entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
break;
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -36,7 +36,6 @@
static int native_entry(Method* method, intptr_t UNUSED, TRAPS);
static int accessor_entry(Method* method, intptr_t UNUSED, TRAPS);
static int empty_entry(Method* method, intptr_t UNUSED, TRAPS);
- static int method_handle_entry(Method* method, intptr_t UNUSED, TRAPS);
public:
// Main loop of normal_entry
@@ -44,7 +43,6 @@
private:
// Helpers for method_handle_entry
- static void process_method_handle(oop method_handle, TRAPS);
static void insert_vmslots(int insert_before, int num_slots, TRAPS);
static void remove_vmslots(int first_slot, int num_slots, TRAPS);
static BasicType result_type_of_handle(oop method_handle);
--- a/hotspot/src/cpu/zero/vm/frame_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/frame_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -351,7 +351,7 @@
switch (offset) {
case pc_off:
strncpy(fieldbuf, "pc", buflen);
- if (method()->is_oop()) {
+ if (method()->is_method()) {
nmethod *code = method()->code();
if (code && code->pc_desc_at(pc())) {
SimpleScopeDesc ssd(code, pc());
@@ -367,7 +367,7 @@
case method_off:
strncpy(fieldbuf, "method", buflen);
- if (method()->is_oop()) {
+ if (method()->is_method()) {
method()->name_and_sig_as_C_string(valuebuf, buflen);
}
return;
@@ -378,7 +378,7 @@
}
// Variable part
- if (method()->is_oop()) {
+ if (method()->is_method()) {
identify_vp_word(frame_index, addr_of_word(offset),
addr_of_word(header_words + 1),
unextended_sp() + method()->max_stack(),
@@ -430,4 +430,3 @@
// unused... but returns fp() to minimize changes introduced by 7087445
return fp();
}
-
--- a/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/frame_zero.inline.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -36,6 +36,8 @@
_deopt_state = unknown;
}
+inline address frame::sender_pc() const { ShouldNotCallThis(); }
+
inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
_zeroframe = zf;
_sp = sp;
--- a/hotspot/src/cpu/zero/vm/icBuffer_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/icBuffer_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -40,7 +40,7 @@
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
- Metadata* cached_oop,
+ void* cached_oop,
address entry_point) {
// NB ic_stub_code_size() must return the size of the code we generate
ShouldNotCallThis();
@@ -51,7 +51,6 @@
ShouldNotCallThis();
}
-Metadata* InlineCacheBuffer::ic_buffer_cached_oop(address code_begin) {
- // NB ic_stub_code_size() must return the size of the code we generate
+void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
ShouldNotCallThis();
}
--- a/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -24,26 +24,159 @@
*/
#include "precompiled.hpp"
+#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
-int MethodHandles::adapter_conversion_ops_supported_mask() {
- return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_CHECK_CAST)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_PRIM_TO_PRIM)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_REF_TO_PRIM)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_SWAP_ARGS)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
- |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
- //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
- );
- // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
+void MethodHandles::invoke_target(Method* method, TRAPS) {
+
+ JavaThread *thread = (JavaThread *) THREAD;
+ ZeroStack *stack = thread->zero_stack();
+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+ interpreterState istate = frame->interpreter_state();
+
+ // Trim back the stack to put the parameters at the top
+ stack->set_sp(istate->stack() + 1);
+
+ Interpreter::invoke_method(method, method->from_interpreted_entry(), THREAD);
+
+ // Convert the result
+ istate->set_stack(stack->sp() - 1);
+
+}
+
+oop MethodHandles::popFromStack(TRAPS) {
+
+ JavaThread *thread = (JavaThread *) THREAD;
+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+ interpreterState istate = frame->interpreter_state();
+ intptr_t* topOfStack = istate->stack();
+
+ oop top = STACK_OBJECT(-1);
+ MORE_STACK(-1);
+ istate->set_stack(topOfStack);
+
+ return top;
+
+}
+
+int MethodHandles::method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS) {
+
+ JavaThread *thread = (JavaThread *) THREAD;
+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+ interpreterState istate = frame->interpreter_state();
+ intptr_t* topOfStack = istate->stack();
+
+ // 'this' is a MethodHandle. We resolve the target method by accessing this.form.vmentry.vmtarget.
+ int numArgs = method->size_of_parameters();
+ oop lform1 = java_lang_invoke_MethodHandle::form(STACK_OBJECT(-numArgs)); // this.form
+ oop vmEntry1 = java_lang_invoke_LambdaForm::vmentry(lform1);
+ Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmEntry1);
+
+ invoke_target(vmtarget, THREAD);
+
+ // No deoptimized frames on the stack
+ return 0;
+}
+
+int MethodHandles::method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS) {
+
+ // Pop appendix argument from stack. This is a MemberName which we resolve to the
+ // target method.
+ oop vmentry = popFromStack(THREAD);
+
+ Method* vmtarget = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+
+ invoke_target(vmtarget, THREAD);
+
+ return 0;
}
-void MethodHandles::generate_method_handle_stub(MacroAssembler* masm,
- MethodHandles::EntryKind ek) {
- init_entry(ek, (MethodHandleEntry *) ek);
+int MethodHandles::method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS) {
+ JavaThread *thread = (JavaThread *) THREAD;
+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+ interpreterState istate = frame->interpreter_state();
+
+ // Pop appendix argument from stack. This is a MemberName which we resolve to the
+ // target method.
+ oop vmentry = popFromStack(THREAD);
+ intptr_t* topOfStack = istate->stack();
+
+ // Resolve target method by looking up in the receiver object's itable.
+ Klass* clazz = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(vmentry));
+ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
+ Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+
+ int numArgs = target->size_of_parameters();
+ oop recv = STACK_OBJECT(-numArgs);
+
+ InstanceKlass* klass_part = InstanceKlass::cast(recv->klass());
+ itableOffsetEntry* ki = (itableOffsetEntry*) klass_part->start_of_itable();
+ int i;
+ for ( i = 0 ; i < klass_part->itable_length() ; i++, ki++ ) {
+ if (ki->interface_klass() == clazz) break;
+ }
+
+ itableMethodEntry* im = ki->first_method_entry(recv->klass());
+ Method* vmtarget = im[vmindex].method();
+
+ invoke_target(vmtarget, THREAD);
+
+ return 0;
}
+
+int MethodHandles::method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS) {
+ JavaThread *thread = (JavaThread *) THREAD;
+
+ InterpreterFrame *frame = thread->top_zero_frame()->as_interpreter_frame();
+ interpreterState istate = frame->interpreter_state();
+
+ // Pop appendix argument from stack. This is a MemberName which we resolve to the
+ // target method.
+ oop vmentry = popFromStack(THREAD);
+ intptr_t* topOfStack = istate->stack();
+
+ // Resolve target method by looking up in the receiver object's vtable.
+ intptr_t vmindex = java_lang_invoke_MemberName::vmindex(vmentry);
+ Method* target = (Method*) java_lang_invoke_MemberName::vmtarget(vmentry);
+ int numArgs = target->size_of_parameters();
+ oop recv = STACK_OBJECT(-numArgs);
+ Klass* clazz = recv->klass();
+ Klass* klass_part = InstanceKlass::cast(clazz);
+ klassVtable* vtable = klass_part->vtable();
+ Method* vmtarget = vtable->method_at(vmindex);
+
+ invoke_target(vmtarget, THREAD);
+
+ return 0;
+}
+
+int MethodHandles::method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS) {
+ ShouldNotReachHere();
+ return 0;
+}
+
+address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* masm,
+ vmIntrinsics::ID iid) {
+ switch (iid) {
+ case vmIntrinsics::_invokeGeneric:
+ case vmIntrinsics::_compiledLambdaForm:
+ // Perhaps surprisingly, the symbolic references visible to Java are not directly used.
+ // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod.
+ // They all allow an appendix argument.
+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invalid);
+ case vmIntrinsics::_invokeBasic:
+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_invokeBasic);
+ case vmIntrinsics::_linkToStatic:
+ case vmIntrinsics::_linkToSpecial:
+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToStaticOrSpecial);
+ case vmIntrinsics::_linkToInterface:
+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToInterface);
+ case vmIntrinsics::_linkToVirtual:
+ return InterpreterGenerator::generate_entry_impl(masm, (address) MethodHandles::method_handle_entry_linkToVirtual);
+ default:
+ ShouldNotReachHere();
+ return NULL;
+ }
+}
--- a/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/methodHandles_zero.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -26,6 +26,14 @@
// Adapters
enum /* platform_dependent_constants */ {
- adapter_code_size = 0
+ adapter_code_size = sizeof(ZeroEntry) * (Interpreter::method_handle_invoke_LAST - Interpreter::method_handle_invoke_FIRST + 1)
};
+private:
+ static oop popFromStack(TRAPS);
+ static void invoke_target(Method* method, TRAPS);
+ static int method_handle_entry_invokeBasic(Method* method, intptr_t UNUSED, TRAPS);
+ static int method_handle_entry_linkToStaticOrSpecial(Method* method, intptr_t UNUSED, TRAPS);
+ static int method_handle_entry_linkToVirtual(Method* method, intptr_t UNUSED, TRAPS);
+ static int method_handle_entry_linkToInterface(Method* method, intptr_t UNUSED, TRAPS);
+ static int method_handle_entry_invalid(Method* method, intptr_t UNUSED, TRAPS);
--- a/hotspot/src/cpu/zero/vm/register_zero.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/register_zero.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -114,5 +114,8 @@
};
CONSTANT_REGISTER_DECLARATION(Register, noreg, (-1));
+#ifndef DONT_USE_REGISTER_DEFINES
+#define noreg ((Register)(noreg_RegisterEnumValue))
+#endif
#endif // CPU_ZERO_VM_REGISTER_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/relocInfo_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/relocInfo_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -77,3 +77,7 @@
CodeBuffer* dst) {
ShouldNotCallThis();
}
+
+void metadata_Relocation::pd_fix_value(address x) {
+ ShouldNotCallThis();
+}
--- a/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/cpu/zero/vm/sharedRuntime_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -35,6 +35,7 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_zero.inline.hpp"
+
#ifdef COMPILER1
#include "c1/c1_Runtime1.hpp"
#endif
@@ -47,6 +48,12 @@
#endif
+
+static address zero_null_code_stub() {
+ address start = ShouldNotCallThisStub();
+ return start;
+}
+
int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
int total_args_passed,
@@ -63,16 +70,14 @@
AdapterFingerPrint *fingerprint) {
return AdapterHandlerLibrary::new_entry(
fingerprint,
- ShouldNotCallThisStub(),
- ShouldNotCallThisStub(),
- ShouldNotCallThisStub());
+ CAST_FROM_FN_PTR(address,zero_null_code_stub),
+ CAST_FROM_FN_PTR(address,zero_null_code_stub),
+ CAST_FROM_FN_PTR(address,zero_null_code_stub));
}
nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
methodHandle method,
int compile_id,
- int total_args_passed,
- int max_arg,
BasicType *sig_bt,
VMRegPair *regs,
BasicType ret_type) {
@@ -96,19 +101,20 @@
ShouldNotCallThis();
}
+JRT_LEAF(void, zero_stub())
+ ShouldNotCallThis();
+JRT_END
+
static RuntimeStub* generate_empty_runtime_stub(const char* name) {
- CodeBuffer buffer(name, 0, 0);
- return RuntimeStub::new_runtime_stub(name, &buffer, 0, 0, NULL, false);
+ return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub);
}
static SafepointBlob* generate_empty_safepoint_blob() {
- CodeBuffer buffer("handler_blob", 0, 0);
- return SafepointBlob::create(&buffer, NULL, 0);
+ return CAST_FROM_FN_PTR(SafepointBlob*,zero_stub);
}
static DeoptimizationBlob* generate_empty_deopt_blob() {
- CodeBuffer buffer("handler_blob", 0, 0);
- return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0);
+ return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub);
}
@@ -116,7 +122,7 @@
_deopt_blob = generate_empty_deopt_blob();
}
-SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, bool cause_return) {
+SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
return generate_empty_safepoint_blob();
}
@@ -124,6 +130,7 @@
return generate_empty_runtime_stub("resolve_blob");
}
+
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
VMRegPair *regs,
int total_args_passed) {
--- a/hotspot/src/os/bsd/vm/attachListener_bsd.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os/bsd/vm/attachListener_bsd.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -342,7 +342,6 @@
// get the credentials of the peer and check the effective uid/guid
// - check with jeff on this.
-#ifdef _ALLBSD_SOURCE
uid_t puid;
gid_t pgid;
if (::getpeereid(s, &puid, &pgid) != 0) {
@@ -350,17 +349,6 @@
RESTARTABLE(::close(s), res);
continue;
}
-#else
- struct ucred cred_info;
- socklen_t optlen = sizeof(cred_info);
- if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
- int res;
- RESTARTABLE(::close(s), res);
- continue;
- }
- uid_t puid = cred_info.uid;
- gid_t pgid = cred_info.gid;
-#endif
uid_t euid = geteuid();
gid_t egid = getegid();
--- a/hotspot/src/os/bsd/vm/osThread_bsd.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os/bsd/vm/osThread_bsd.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -39,18 +39,12 @@
private:
-#ifdef _ALLBSD_SOURCE
-
#ifdef __APPLE__
typedef thread_t thread_id_t;
#else
typedef pthread_t thread_id_t;
#endif
-#else
- typedef pid_t thread_id_t;
-#endif
-
// _pthread_id is the pthread id, which is used by library calls
// (e.g. pthread_kill).
pthread_t _pthread_id;
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -108,14 +108,8 @@
# include <semaphore.h>
# include <fcntl.h>
# include <string.h>
-#ifdef _ALLBSD_SOURCE
# include <sys/param.h>
# include <sys/sysctl.h>
-#else
-# include <syscall.h>
-# include <sys/sysinfo.h>
-# include <gnu/libc-version.h>
-#endif
# include <sys/ipc.h>
# include <sys/shm.h>
#ifndef __APPLE__
@@ -150,25 +144,10 @@
// global variables
julong os::Bsd::_physical_memory = 0;
-#ifndef _ALLBSD_SOURCE
-address os::Bsd::_initial_thread_stack_bottom = NULL;
-uintptr_t os::Bsd::_initial_thread_stack_size = 0;
-#endif
int (*os::Bsd::_clock_gettime)(clockid_t, struct timespec *) = NULL;
-#ifndef _ALLBSD_SOURCE
-int (*os::Bsd::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
-Mutex* os::Bsd::_createThread_lock = NULL;
-#endif
pthread_t os::Bsd::_main_thread;
int os::Bsd::_page_size = -1;
-#ifndef _ALLBSD_SOURCE
-bool os::Bsd::_is_floating_stack = false;
-bool os::Bsd::_is_NPTL = false;
-bool os::Bsd::_supports_fast_thread_cpu_time = false;
-const char * os::Bsd::_glibc_version = NULL;
-const char * os::Bsd::_libpthread_version = NULL;
-#endif
static jlong initial_time_count=0;
@@ -176,7 +155,7 @@
// For diagnostics to print a message once. see run_periodic_checks
static sigset_t check_signal_done;
-static bool check_signals = true;;
+static bool check_signals = true;
static pid_t _initial_pid = 0;
@@ -198,16 +177,8 @@
}
julong os::Bsd::available_memory() {
-#ifdef _ALLBSD_SOURCE
// XXXBSD: this is just a stopgap implementation
return physical_memory() >> 2;
-#else
- // values in struct sysinfo are "unsigned long"
- struct sysinfo si;
- sysinfo(&si);
-
- return (julong)si.freeram * si.mem_unit;
-#endif
}
julong os::physical_memory() {
@@ -255,22 +226,6 @@
}
-#ifndef _ALLBSD_SOURCE
-#ifndef SYS_gettid
-// i386: 224, ia64: 1105, amd64: 186, sparc 143
-#ifdef __ia64__
-#define SYS_gettid 1105
-#elif __i386__
-#define SYS_gettid 224
-#elif __amd64__
-#define SYS_gettid 186
-#elif __sparc__
-#define SYS_gettid 143
-#else
-#error define gettid for the arch
-#endif
-#endif
-#endif
// Cpu architecture string
#if defined(ZERO)
@@ -302,36 +257,7 @@
#define COMPILER_VARIANT "client"
#endif
-#ifndef _ALLBSD_SOURCE
-// pid_t gettid()
-//
-// Returns the kernel thread id of the currently running thread. Kernel
-// thread id is used to access /proc.
-//
-// (Note that getpid() on BsdThreads returns kernel thread id too; but
-// on NPTL, it returns the same pid for all threads, as required by POSIX.)
-//
-pid_t os::Bsd::gettid() {
- int rslt = syscall(SYS_gettid);
- if (rslt == -1) {
- // old kernel, no NPTL support
- return getpid();
- } else {
- return (pid_t)rslt;
- }
-}
-
-// Most versions of bsd have a bug where the number of processors are
-// determined by looking at the /proc file system. In a chroot environment,
-// the system call returns 1. This causes the VM to act as if it is
-// a single processor and elide locking (see is_MP() call).
-static bool unsafe_chroot_detected = false;
-static const char *unstable_chroot_error = "/proc file system not found.\n"
- "Java may be unstable running multithreaded in a chroot "
- "environment on Bsd when /proc filesystem is not mounted.";
-#endif
-
-#ifdef _ALLBSD_SOURCE
+
void os::Bsd::initialize_system_info() {
int mib[2];
size_t len;
@@ -370,24 +296,6 @@
}
#endif
}
-#else
-void os::Bsd::initialize_system_info() {
- set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
- if (processor_count() == 1) {
- pid_t pid = os::Bsd::gettid();
- char fname[32];
- jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
- FILE *fp = fopen(fname, "r");
- if (fp == NULL) {
- unsafe_chroot_detected = true;
- } else {
- fclose(fp);
- }
- }
- _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
- assert(processor_count() > 0, "bsd error");
-}
-#endif
#ifdef __APPLE__
static const char *get_home() {
@@ -744,171 +652,6 @@
}
}
-#ifndef _ALLBSD_SOURCE
-//////////////////////////////////////////////////////////////////////////////
-// detecting pthread library
-
-void os::Bsd::libpthread_init() {
- // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
- // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
- // generic name for earlier versions.
- // Define macros here so we can build HotSpot on old systems.
-# ifndef _CS_GNU_LIBC_VERSION
-# define _CS_GNU_LIBC_VERSION 2
-# endif
-# ifndef _CS_GNU_LIBPTHREAD_VERSION
-# define _CS_GNU_LIBPTHREAD_VERSION 3
-# endif
-
- size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
- if (n > 0) {
- char *str = (char *)malloc(n);
- confstr(_CS_GNU_LIBC_VERSION, str, n);
- os::Bsd::set_glibc_version(str);
- } else {
- // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
- static char _gnu_libc_version[32];
- jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
- "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
- os::Bsd::set_glibc_version(_gnu_libc_version);
- }
-
- n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
- if (n > 0) {
- char *str = (char *)malloc(n);
- confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
- // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
- // us "NPTL-0.29" even we are running with BsdThreads. Check if this
- // is the case. BsdThreads has a hard limit on max number of threads.
- // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
- // On the other hand, NPTL does not have such a limit, sysconf()
- // will return -1 and errno is not changed. Check if it is really NPTL.
- if (strcmp(os::Bsd::glibc_version(), "glibc 2.3.2") == 0 &&
- strstr(str, "NPTL") &&
- sysconf(_SC_THREAD_THREADS_MAX) > 0) {
- free(str);
- os::Bsd::set_libpthread_version("bsdthreads");
- } else {
- os::Bsd::set_libpthread_version(str);
- }
- } else {
- // glibc before 2.3.2 only has BsdThreads.
- os::Bsd::set_libpthread_version("bsdthreads");
- }
-
- if (strstr(libpthread_version(), "NPTL")) {
- os::Bsd::set_is_NPTL();
- } else {
- os::Bsd::set_is_BsdThreads();
- }
-
- // BsdThreads have two flavors: floating-stack mode, which allows variable
- // stack size; and fixed-stack mode. NPTL is always floating-stack.
- if (os::Bsd::is_NPTL() || os::Bsd::supports_variable_stack_size()) {
- os::Bsd::set_is_floating_stack();
- }
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// thread stack
-
-// Force Bsd kernel to expand current thread stack. If "bottom" is close
-// to the stack guard, caller should block all signals.
-//
-// MAP_GROWSDOWN:
-// A special mmap() flag that is used to implement thread stacks. It tells
-// kernel that the memory region should extend downwards when needed. This
-// allows early versions of BsdThreads to only mmap the first few pages
-// when creating a new thread. Bsd kernel will automatically expand thread
-// stack as needed (on page faults).
-//
-// However, because the memory region of a MAP_GROWSDOWN stack can grow on
-// demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
-// region, it's hard to tell if the fault is due to a legitimate stack
-// access or because of reading/writing non-exist memory (e.g. buffer
-// overrun). As a rule, if the fault happens below current stack pointer,
-// Bsd kernel does not expand stack, instead a SIGSEGV is sent to the
-// application (see Bsd kernel fault.c).
-//
-// This Bsd feature can cause SIGSEGV when VM bangs thread stack for
-// stack overflow detection.
-//
-// Newer version of BsdThreads (since glibc-2.2, or, RH-7.x) and NPTL do
-// not use this flag. However, the stack of initial thread is not created
-// by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
-// unlikely) that user code can create a thread with MAP_GROWSDOWN stack
-// and then attach the thread to JVM.
-//
-// To get around the problem and allow stack banging on Bsd, we need to
-// manually expand thread stack after receiving the SIGSEGV.
-//
-// There are two ways to expand thread stack to address "bottom", we used
-// both of them in JVM before 1.5:
-// 1. adjust stack pointer first so that it is below "bottom", and then
-// touch "bottom"
-// 2. mmap() the page in question
-//
-// Now alternate signal stack is gone, it's harder to use 2. For instance,
-// if current sp is already near the lower end of page 101, and we need to
-// call mmap() to map page 100, it is possible that part of the mmap() frame
-// will be placed in page 100. When page 100 is mapped, it is zero-filled.
-// That will destroy the mmap() frame and cause VM to crash.
-//
-// The following code works by adjusting sp first, then accessing the "bottom"
-// page to force a page fault. Bsd kernel will then automatically expand the
-// stack mapping.
-//
-// _expand_stack_to() assumes its frame size is less than page size, which
-// should always be true if the function is not inlined.
-
-#if __GNUC__ < 3 // gcc 2.x does not support noinline attribute
-#define NOINLINE
-#else
-#define NOINLINE __attribute__ ((noinline))
-#endif
-
-static void _expand_stack_to(address bottom) NOINLINE;
-
-static void _expand_stack_to(address bottom) {
- address sp;
- size_t size;
- volatile char *p;
-
- // Adjust bottom to point to the largest address within the same page, it
- // gives us a one-page buffer if alloca() allocates slightly more memory.
- bottom = (address)align_size_down((uintptr_t)bottom, os::Bsd::page_size());
- bottom += os::Bsd::page_size() - 1;
-
- // sp might be slightly above current stack pointer; if that's the case, we
- // will alloca() a little more space than necessary, which is OK. Don't use
- // os::current_stack_pointer(), as its result can be slightly below current
- // stack pointer, causing us to not alloca enough to reach "bottom".
- sp = (address)&sp;
-
- if (sp > bottom) {
- size = sp - bottom;
- p = (volatile char *)alloca(size);
- assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
- p[0] = '\0';
- }
-}
-
-bool os::Bsd::manually_expand_stack(JavaThread * t, address addr) {
- assert(t!=NULL, "just checking");
- assert(t->osthread()->expanding_stack(), "expand should be set");
- assert(t->stack_base() != NULL, "stack_base was not initialized");
-
- if (addr < t->stack_base() && addr >= t->stack_yellow_zone_base()) {
- sigset_t mask_all, old_sigset;
- sigfillset(&mask_all);
- pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
- _expand_stack_to(addr);
- pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
- return true;
- }
- return false;
-}
-#endif
//////////////////////////////////////////////////////////////////////////////
// create new thread
@@ -917,43 +660,7 @@
// check if it's safe to start a new thread
static bool _thread_safety_check(Thread* thread) {
-#ifdef _ALLBSD_SOURCE
- return true;
-#else
- if (os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack()) {
- // Fixed stack BsdThreads (SuSE Bsd/x86, and some versions of Redhat)
- // Heap is mmap'ed at lower end of memory space. Thread stacks are
- // allocated (MAP_FIXED) from high address space. Every thread stack
- // occupies a fixed size slot (usually 2Mbytes, but user can change
- // it to other values if they rebuild BsdThreads).
- //
- // Problem with MAP_FIXED is that mmap() can still succeed even part of
- // the memory region has already been mmap'ed. That means if we have too
- // many threads and/or very large heap, eventually thread stack will
- // collide with heap.
- //
- // Here we try to prevent heap/stack collision by comparing current
- // stack bottom with the highest address that has been mmap'ed by JVM
- // plus a safety margin for memory maps created by native code.
- //
- // This feature can be disabled by setting ThreadSafetyMargin to 0
- //
- if (ThreadSafetyMargin > 0) {
- address stack_bottom = os::current_stack_base() - os::current_stack_size();
-
- // not safe if our stack extends below the safety margin
- return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
- } else {
- return true;
- }
- } else {
- // Floating stack BsdThreads or NPTL:
- // Unlike fixed stack BsdThreads, thread stacks are not MAP_FIXED. When
- // there's not enough space left, pthread_create() will fail. If we come
- // here, that means enough space has been reserved for stack.
- return true;
- }
-#endif
+ return true;
}
#ifdef __APPLE__
@@ -991,7 +698,6 @@
return NULL;
}
-#ifdef _ALLBSD_SOURCE
#ifdef __APPLE__
// thread_id is mach thread on macos
osthread->set_thread_id(::mach_thread_self());
@@ -999,17 +705,6 @@
// thread_id is pthread_id on BSD
osthread->set_thread_id(::pthread_self());
#endif
-#else
- // thread_id is kernel thread id (similar to Solaris LWP id)
- osthread->set_thread_id(os::Bsd::gettid());
-
- if (UseNUMA) {
- int lgrp_id = os::numa_get_group_id();
- if (lgrp_id != -1) {
- thread->set_lgrp_id(lgrp_id);
- }
- }
-#endif
// initialize signal mask for this thread
os::Bsd::hotspot_sigmask(thread);
@@ -1099,23 +794,9 @@
// let pthread_create() pick the default value.
}
-#ifndef _ALLBSD_SOURCE
- // glibc guard page
- pthread_attr_setguardsize(&attr, os::Bsd::default_guard_size(thr_type));
-#endif
-
ThreadState state;
{
-
-#ifndef _ALLBSD_SOURCE
- // Serialize thread creation if we are running with fixed stack BsdThreads
- bool lock = os::Bsd::is_BsdThreads() && !os::Bsd::is_floating_stack();
- if (lock) {
- os::Bsd::createThread_lock()->lock_without_safepoint_check();
- }
-#endif
-
pthread_t tid;
int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
@@ -1128,9 +809,6 @@
// Need to clean up stuff we've allocated so far
thread->set_osthread(NULL);
delete osthread;
-#ifndef _ALLBSD_SOURCE
- if (lock) os::Bsd::createThread_lock()->unlock();
-#endif
return false;
}
@@ -1146,11 +824,6 @@
}
}
-#ifndef _ALLBSD_SOURCE
- if (lock) {
- os::Bsd::createThread_lock()->unlock();
- }
-#endif
}
// Aborted due to thread limit being reached
@@ -1188,15 +861,11 @@
}
// Store pthread info into the OSThread
-#ifdef _ALLBSD_SOURCE
#ifdef __APPLE__
osthread->set_thread_id(::mach_thread_self());
#else
osthread->set_thread_id(::pthread_self());
#endif
-#else
- osthread->set_thread_id(os::Bsd::gettid());
-#endif
osthread->set_pthread_id(::pthread_self());
// initialize floating point control register
@@ -1207,35 +876,6 @@
thread->set_osthread(osthread);
-#ifndef _ALLBSD_SOURCE
- if (UseNUMA) {
- int lgrp_id = os::numa_get_group_id();
- if (lgrp_id != -1) {
- thread->set_lgrp_id(lgrp_id);
- }
- }
-
- if (os::Bsd::is_initial_thread()) {
- // If current thread is initial thread, its stack is mapped on demand,
- // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
- // the entire stack region to avoid SEGV in stack banging.
- // It is also useful to get around the heap-stack-gap problem on SuSE
- // kernel (see 4821821 for details). We first expand stack to the top
- // of yellow zone, then enable stack yellow zone (order is significant,
- // enabling yellow zone first will crash JVM on SuSE Bsd), so there
- // is no gap between the last two virtual memory regions.
-
- JavaThread *jt = (JavaThread *)thread;
- address addr = jt->stack_yellow_zone_base();
- assert(addr != NULL, "initialization problem?");
- assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
-
- osthread->set_expanding_stack();
- os::Bsd::manually_expand_stack(jt, addr);
- osthread->clear_expanding_stack();
- }
-#endif
-
// initialize signal mask for this thread
// and save the caller's signal mask
os::Bsd::hotspot_sigmask(thread);
@@ -1290,247 +930,6 @@
return ThreadLocalStorage::thread();
}
-//////////////////////////////////////////////////////////////////////////////
-// initial thread
-
-#ifndef _ALLBSD_SOURCE
-// Check if current thread is the initial thread, similar to Solaris thr_main.
-bool os::Bsd::is_initial_thread(void) {
- char dummy;
- // If called before init complete, thread stack bottom will be null.
- // Can be called if fatal error occurs before initialization.
- if (initial_thread_stack_bottom() == NULL) return false;
- assert(initial_thread_stack_bottom() != NULL &&
- initial_thread_stack_size() != 0,
- "os::init did not locate initial thread's stack region");
- if ((address)&dummy >= initial_thread_stack_bottom() &&
- (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
- return true;
- else return false;
-}
-
-// Find the virtual memory area that contains addr
-static bool find_vma(address addr, address* vma_low, address* vma_high) {
- FILE *fp = fopen("/proc/self/maps", "r");
- if (fp) {
- address low, high;
- while (!feof(fp)) {
- if (fscanf(fp, "%p-%p", &low, &high) == 2) {
- if (low <= addr && addr < high) {
- if (vma_low) *vma_low = low;
- if (vma_high) *vma_high = high;
- fclose (fp);
- return true;
- }
- }
- for (;;) {
- int ch = fgetc(fp);
- if (ch == EOF || ch == (int)'\n') break;
- }
- }
- fclose(fp);
- }
- return false;
-}
-
-// Locate initial thread stack. This special handling of initial thread stack
-// is needed because pthread_getattr_np() on most (all?) Bsd distros returns
-// bogus value for initial thread.
-void os::Bsd::capture_initial_stack(size_t max_size) {
- // stack size is the easy part, get it from RLIMIT_STACK
- size_t stack_size;
- struct rlimit rlim;
- getrlimit(RLIMIT_STACK, &rlim);
- stack_size = rlim.rlim_cur;
-
- // 6308388: a bug in ld.so will relocate its own .data section to the
- // lower end of primordial stack; reduce ulimit -s value a little bit
- // so we won't install guard page on ld.so's data section.
- stack_size -= 2 * page_size();
-
- // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat
- // 7.1, in both cases we will get 2G in return value.
- // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0,
- // SuSE 7.2, Debian) can not handle alternate signal stack correctly
- // for initial thread if its stack size exceeds 6M. Cap it at 2M,
- // in case other parts in glibc still assumes 2M max stack size.
- // FIXME: alt signal stack is gone, maybe we can relax this constraint?
-#ifndef IA64
- if (stack_size > 2 * K * K) stack_size = 2 * K * K;
-#else
- // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
- if (stack_size > 4 * K * K) stack_size = 4 * K * K;
-#endif
-
- // Try to figure out where the stack base (top) is. This is harder.
- //
- // When an application is started, glibc saves the initial stack pointer in
- // a global variable "__libc_stack_end", which is then used by system
- // libraries. __libc_stack_end should be pretty close to stack top. The
- // variable is available since the very early days. However, because it is
- // a private interface, it could disappear in the future.
- //
- // Bsd kernel saves start_stack information in /proc/<pid>/stat. Similar
- // to __libc_stack_end, it is very close to stack top, but isn't the real
- // stack top. Note that /proc may not exist if VM is running as a chroot
- // program, so reading /proc/<pid>/stat could fail. Also the contents of
- // /proc/<pid>/stat could change in the future (though unlikely).
- //
- // We try __libc_stack_end first. If that doesn't work, look for
- // /proc/<pid>/stat. If neither of them works, we use current stack pointer
- // as a hint, which should work well in most cases.
-
- uintptr_t stack_start;
-
- // try __libc_stack_end first
- uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
- if (p && *p) {
- stack_start = *p;
- } else {
- // see if we can get the start_stack field from /proc/self/stat
- FILE *fp;
- int pid;
- char state;
- int ppid;
- int pgrp;
- int session;
- int nr;
- int tpgrp;
- unsigned long flags;
- unsigned long minflt;
- unsigned long cminflt;
- unsigned long majflt;
- unsigned long cmajflt;
- unsigned long utime;
- unsigned long stime;
- long cutime;
- long cstime;
- long prio;
- long nice;
- long junk;
- long it_real;
- uintptr_t start;
- uintptr_t vsize;
- intptr_t rss;
- uintptr_t rsslim;
- uintptr_t scodes;
- uintptr_t ecode;
- int i;
-
- // Figure what the primordial thread stack base is. Code is inspired
- // by email from Hans Boehm. /proc/self/stat begins with current pid,
- // followed by command name surrounded by parentheses, state, etc.
- char stat[2048];
- int statlen;
-
- fp = fopen("/proc/self/stat", "r");
- if (fp) {
- statlen = fread(stat, 1, 2047, fp);
- stat[statlen] = '\0';
- fclose(fp);
-
- // Skip pid and the command string. Note that we could be dealing with
- // weird command names, e.g. user could decide to rename java launcher
- // to "java 1.4.2 :)", then the stat file would look like
- // 1234 (java 1.4.2 :)) R ... ...
- // We don't really need to know the command string, just find the last
- // occurrence of ")" and then start parsing from there. See bug 4726580.
- char * s = strrchr(stat, ')');
-
- i = 0;
- if (s) {
- // Skip blank chars
- do s++; while (isspace(*s));
-
-#define _UFM UINTX_FORMAT
-#define _DFM INTX_FORMAT
-
- /* 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 */
- /* 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 */
- i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
- &state, /* 3 %c */
- &ppid, /* 4 %d */
- &pgrp, /* 5 %d */
- &session, /* 6 %d */
- &nr, /* 7 %d */
- &tpgrp, /* 8 %d */
- &flags, /* 9 %lu */
- &minflt, /* 10 %lu */
- &cminflt, /* 11 %lu */
- &majflt, /* 12 %lu */
- &cmajflt, /* 13 %lu */
- &utime, /* 14 %lu */
- &stime, /* 15 %lu */
- &cutime, /* 16 %ld */
- &cstime, /* 17 %ld */
- &prio, /* 18 %ld */
- &nice, /* 19 %ld */
- &junk, /* 20 %ld */
- &it_real, /* 21 %ld */
- &start, /* 22 UINTX_FORMAT */
- &vsize, /* 23 UINTX_FORMAT */
- &rss, /* 24 INTX_FORMAT */
- &rsslim, /* 25 UINTX_FORMAT */
- &scodes, /* 26 UINTX_FORMAT */
- &ecode, /* 27 UINTX_FORMAT */
- &stack_start); /* 28 UINTX_FORMAT */
- }
-
-#undef _UFM
-#undef _DFM
-
- if (i != 28 - 2) {
- assert(false, "Bad conversion from /proc/self/stat");
- // product mode - assume we are the initial thread, good luck in the
- // embedded case.
- warning("Can't detect initial thread stack location - bad conversion");
- stack_start = (uintptr_t) &rlim;
- }
- } else {
- // For some reason we can't open /proc/self/stat (for example, running on
- // FreeBSD with a Bsd emulator, or inside chroot), this should work for
- // most cases, so don't abort:
- warning("Can't detect initial thread stack location - no /proc/self/stat");
- stack_start = (uintptr_t) &rlim;
- }
- }
-
- // Now we have a pointer (stack_start) very close to the stack top, the
- // next thing to do is to figure out the exact location of stack top. We
- // can find out the virtual memory area that contains stack_start by
- // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
- // and its upper limit is the real stack top. (again, this would fail if
- // running inside chroot, because /proc may not exist.)
-
- uintptr_t stack_top;
- address low, high;
- if (find_vma((address)stack_start, &low, &high)) {
- // success, "high" is the true stack top. (ignore "low", because initial
- // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
- stack_top = (uintptr_t)high;
- } else {
- // failed, likely because /proc/self/maps does not exist
- warning("Can't detect initial thread stack location - find_vma failed");
- // best effort: stack_start is normally within a few pages below the real
- // stack top, use it as stack top, and reduce stack size so we won't put
- // guard page outside stack.
- stack_top = stack_start;
- stack_size -= 16 * page_size();
- }
-
- // stack_top could be partially down the page so align it
- stack_top = align_size_up(stack_top, page_size());
-
- if (max_size && stack_size > max_size) {
- _initial_thread_stack_size = max_size;
- } else {
- _initial_thread_stack_size = stack_size;
- }
-
- _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
- _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
-}
-#endif
////////////////////////////////////////////////////////////////////////////////
// time support
@@ -1576,7 +975,7 @@
void os::Bsd::clock_init() {
// XXXDARWIN: Investigate replacement monotonic clock
}
-#elif defined(_ALLBSD_SOURCE)
+#else
void os::Bsd::clock_init() {
struct timespec res;
struct timespec tp;
@@ -1586,86 +985,8 @@
_clock_gettime = ::clock_gettime;
}
}
-#else
-void os::Bsd::clock_init() {
- // we do dlopen's in this particular order due to bug in bsd
- // dynamical loader (see 6348968) leading to crash on exit
- void* handle = dlopen("librt.so.1", RTLD_LAZY);
- if (handle == NULL) {
- handle = dlopen("librt.so", RTLD_LAZY);
- }
-
- if (handle) {
- int (*clock_getres_func)(clockid_t, struct timespec*) =
- (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
- int (*clock_gettime_func)(clockid_t, struct timespec*) =
- (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
- if (clock_getres_func && clock_gettime_func) {
- // See if monotonic clock is supported by the kernel. Note that some
- // early implementations simply return kernel jiffies (updated every
- // 1/100 or 1/1000 second). It would be bad to use such a low res clock
- // for nano time (though the monotonic property is still nice to have).
- // It's fixed in newer kernels, however clock_getres() still returns
- // 1/HZ. We check if clock_getres() works, but will ignore its reported
- // resolution for now. Hopefully as people move to new kernels, this
- // won't be a problem.
- struct timespec res;
- struct timespec tp;
- if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
- clock_gettime_func(CLOCK_MONOTONIC, &tp) == 0) {
- // yes, monotonic clock is supported
- _clock_gettime = clock_gettime_func;
- } else {
- // close librt if there is no monotonic clock
- dlclose(handle);
- }
- }
- }
-}
#endif
-#ifndef _ALLBSD_SOURCE
-#ifndef SYS_clock_getres
-
-#if defined(IA32) || defined(AMD64)
-#define SYS_clock_getres IA32_ONLY(266) AMD64_ONLY(229)
-#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
-#else
-#warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
-#define sys_clock_getres(x,y) -1
-#endif
-
-#else
-#define sys_clock_getres(x,y) ::syscall(SYS_clock_getres, x, y)
-#endif
-
-void os::Bsd::fast_thread_clock_init() {
- if (!UseBsdPosixThreadCPUClocks) {
- return;
- }
- clockid_t clockid;
- struct timespec tp;
- int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
- (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
-
- // Switch to using fast clocks for thread cpu time if
- // the sys_clock_getres() returns 0 error code.
- // Note, that some kernels may support the current thread
- // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
- // returned by the pthread_getcpuclockid().
- // If the fast Posix clocks are supported then the sys_clock_getres()
- // must return at least tp.tv_sec == 0 which means a resolution
- // better than 1 sec. This is extra check for reliability.
-
- if(pthread_getcpuclockid_func &&
- pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
- sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
-
- _supports_fast_thread_cpu_time = true;
- _pthread_getcpuclockid = pthread_getcpuclockid_func;
- }
-}
-#endif
jlong os::javaTimeNanos() {
if (Bsd::supports_monotonic_clock()) {
@@ -1978,7 +1299,6 @@
return false;
}
-#ifdef _ALLBSD_SOURCE
// ported from solaris version
bool os::dll_address_to_library_name(address addr, char* buf,
int buflen, int* offset) {
@@ -1994,86 +1314,10 @@
return false;
}
}
-#else
-struct _address_to_library_name {
- address addr; // input : memory address
- size_t buflen; // size of fname
- char* fname; // output: library name
- address base; // library base addr
-};
-
-static int address_to_library_name_callback(struct dl_phdr_info *info,
- size_t size, void *data) {
- int i;
- bool found = false;
- address libbase = NULL;
- struct _address_to_library_name * d = (struct _address_to_library_name *)data;
-
- // iterate through all loadable segments
- for (i = 0; i < info->dlpi_phnum; i++) {
- address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
- if (info->dlpi_phdr[i].p_type == PT_LOAD) {
- // base address of a library is the lowest address of its loaded
- // segments.
- if (libbase == NULL || libbase > segbase) {
- libbase = segbase;
- }
- // see if 'addr' is within current segment
- if (segbase <= d->addr &&
- d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
- found = true;
- }
- }
- }
-
- // dlpi_name is NULL or empty if the ELF file is executable, return 0
- // so dll_address_to_library_name() can fall through to use dladdr() which
- // can figure out executable name from argv[0].
- if (found && info->dlpi_name && info->dlpi_name[0]) {
- d->base = libbase;
- if (d->fname) {
- jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
- }
- return 1;
- }
- return 0;
-}
-
-bool os::dll_address_to_library_name(address addr, char* buf,
- int buflen, int* offset) {
- Dl_info dlinfo;
- struct _address_to_library_name data;
-
- // There is a bug in old glibc dladdr() implementation that it could resolve
- // to wrong library name if the .so file has a base address != NULL. Here
- // we iterate through the program headers of all loaded libraries to find
- // out which library 'addr' really belongs to. This workaround can be
- // removed once the minimum requirement for glibc is moved to 2.3.x.
- data.addr = addr;
- data.fname = buf;
- data.buflen = buflen;
- data.base = NULL;
- int rslt = dl_iterate_phdr(address_to_library_name_callback, (void *)&data);
-
- if (rslt) {
- // buf already contains library name
- if (offset) *offset = addr - data.base;
- return true;
- } else if (dladdr((void*)addr, &dlinfo)){
- if (buf) jio_snprintf(buf, buflen, "%s", dlinfo.dli_fname);
- if (offset) *offset = addr - (address)dlinfo.dli_fbase;
- return true;
- } else {
- if (buf) buf[0] = '\0';
- if (offset) *offset = -1;
- return false;
- }
-}
-#endif
-
- // Loads .dll/.so and
- // in case of error it checks if .dll/.so was built for the
- // same architecture as Hotspot is running on
+
+// Loads .dll/.so and
+// in case of error it checks if .dll/.so was built for the
+// same architecture as Hotspot is running on
#ifdef __APPLE__
void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
@@ -2292,7 +1536,6 @@
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
-#ifdef _ALLBSD_SOURCE
#ifdef RTLD_DI_LINKMAP
Dl_info dli;
void *handle;
@@ -2336,16 +1579,6 @@
#else
st->print_cr("Error: Cannot print dynamic libraries.");
#endif
-#else
- char fname[32];
- pid_t pid = os::Bsd::gettid();
-
- jio_snprintf(fname, sizeof(fname), "/proc/%d/maps", pid);
-
- if (!_print_ascii_file(fname, st)) {
- st->print("Can not get library information for pid = %d\n", pid);
- }
-#endif
}
void os::print_os_info_brief(outputStream* st) {
@@ -2374,22 +1607,10 @@
st->print("Memory:");
st->print(" %dk page", os::vm_page_size()>>10);
-#ifndef _ALLBSD_SOURCE
- // values in struct sysinfo are "unsigned long"
- struct sysinfo si;
- sysinfo(&si);
-#endif
-
st->print(", physical " UINT64_FORMAT "k",
os::physical_memory() >> 10);
st->print("(" UINT64_FORMAT "k free)",
os::available_memory() >> 10);
-#ifndef _ALLBSD_SOURCE
- st->print(", swap " UINT64_FORMAT "k",
- ((jlong)si.totalswap * si.mem_unit) >> 10);
- st->print("(" UINT64_FORMAT "k free)",
- ((jlong)si.freeswap * si.mem_unit) >> 10);
-#endif
st->cr();
// meminfo
@@ -2786,42 +2007,13 @@
#endif
}
-#ifndef _ALLBSD_SOURCE
-// Define MAP_HUGETLB here so we can build HotSpot on old systems.
-#ifndef MAP_HUGETLB
-#define MAP_HUGETLB 0x40000
-#endif
-
-// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
-#ifndef MADV_HUGEPAGE
-#define MADV_HUGEPAGE 14
-#endif
-#endif
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
bool exec) {
-#ifndef _ALLBSD_SOURCE
- if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
- int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
- uintptr_t res =
- (uintptr_t) ::mmap(addr, size, prot,
- MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
- -1, 0);
- return res != (uintptr_t) MAP_FAILED;
- }
-#endif
-
return commit_memory(addr, size, exec);
}
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
-#ifndef _ALLBSD_SOURCE
- if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
- // We don't check the return value: madvise(MADV_HUGEPAGE) may not
- // be supported or the memory may already be backed by huge pages.
- ::madvise(addr, bytes, MADV_HUGEPAGE);
- }
-#endif
}
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
@@ -2860,111 +2052,6 @@
return end;
}
-#ifndef _ALLBSD_SOURCE
-// Something to do with the numa-aware allocator needs these symbols
-extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
-extern "C" JNIEXPORT void numa_error(char *where) { }
-extern "C" JNIEXPORT int fork1() { return fork(); }
-
-
-// If we are running with libnuma version > 2, then we should
-// be trying to use symbols with versions 1.1
-// If we are running with earlier version, which did not have symbol versions,
-// we should use the base version.
-void* os::Bsd::libnuma_dlsym(void* handle, const char *name) {
- void *f = dlvsym(handle, name, "libnuma_1.1");
- if (f == NULL) {
- f = dlsym(handle, name);
- }
- return f;
-}
-
-bool os::Bsd::libnuma_init() {
- // sched_getcpu() should be in libc.
- set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
- dlsym(RTLD_DEFAULT, "sched_getcpu")));
-
- if (sched_getcpu() != -1) { // Does it work?
- void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
- if (handle != NULL) {
- set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
- libnuma_dlsym(handle, "numa_node_to_cpus")));
- set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
- libnuma_dlsym(handle, "numa_max_node")));
- set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
- libnuma_dlsym(handle, "numa_available")));
- set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
- libnuma_dlsym(handle, "numa_tonode_memory")));
- set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
- libnuma_dlsym(handle, "numa_interleave_memory")));
-
-
- if (numa_available() != -1) {
- set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
- // Create a cpu -> node mapping
- _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
- rebuild_cpu_to_node_map();
- return true;
- }
- }
- }
- return false;
-}
-
-// rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
-// The table is later used in get_node_by_cpu().
-void os::Bsd::rebuild_cpu_to_node_map() {
- const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
- // in libnuma (possible values are starting from 16,
- // and continuing up with every other power of 2, but less
- // than the maximum number of CPUs supported by kernel), and
- // is a subject to change (in libnuma version 2 the requirements
- // are more reasonable) we'll just hardcode the number they use
- // in the library.
- const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
-
- size_t cpu_num = os::active_processor_count();
- size_t cpu_map_size = NCPUS / BitsPerCLong;
- size_t cpu_map_valid_size =
- MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
-
- cpu_to_node()->clear();
- cpu_to_node()->at_grow(cpu_num - 1);
- size_t node_num = numa_get_groups_num();
-
- unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
- for (size_t i = 0; i < node_num; i++) {
- if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
- for (size_t j = 0; j < cpu_map_valid_size; j++) {
- if (cpu_map[j] != 0) {
- for (size_t k = 0; k < BitsPerCLong; k++) {
- if (cpu_map[j] & (1UL << k)) {
- cpu_to_node()->at_put(j * BitsPerCLong + k, i);
- }
- }
- }
- }
- }
- }
- FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
-}
-
-int os::Bsd::get_node_by_cpu(int cpu_id) {
- if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
- return cpu_to_node()->at(cpu_id);
- }
- return -1;
-}
-
-GrowableArray<int>* os::Bsd::_cpu_to_node;
-os::Bsd::sched_getcpu_func_t os::Bsd::_sched_getcpu;
-os::Bsd::numa_node_to_cpus_func_t os::Bsd::_numa_node_to_cpus;
-os::Bsd::numa_max_node_func_t os::Bsd::_numa_max_node;
-os::Bsd::numa_available_func_t os::Bsd::_numa_available;
-os::Bsd::numa_tonode_memory_func_t os::Bsd::_numa_tonode_memory;
-os::Bsd::numa_interleave_memory_func_t os::Bsd::_numa_interleave_memory;
-unsigned long* os::Bsd::_numa_all_nodes;
-#endif
bool os::pd_uncommit_memory(char* addr, size_t size) {
#ifdef __OpenBSD__
@@ -3084,42 +2171,7 @@
}
bool os::Bsd::hugetlbfs_sanity_check(bool warn, size_t page_size) {
- bool result = false;
-#ifndef _ALLBSD_SOURCE
- void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
- -1, 0);
-
- if (p != (void *) -1) {
- // We don't know if this really is a huge page or not.
- FILE *fp = fopen("/proc/self/maps", "r");
- if (fp) {
- while (!feof(fp)) {
- char chars[257];
- long x = 0;
- if (fgets(chars, sizeof(chars), fp)) {
- if (sscanf(chars, "%lx-%*x", &x) == 1
- && x == (long)p) {
- if (strstr (chars, "hugepage")) {
- result = true;
- break;
- }
- }
- }
- }
- fclose(fp);
- }
- munmap (p, page_size);
- if (result)
- return true;
- }
-
- if (warn) {
- warning("HugeTLBFS is not supported by the operating system.");
- }
-#endif
-
- return result;
+ return false;
}
/*
@@ -3164,92 +2216,8 @@
static size_t _large_page_size = 0;
void os::large_page_init() {
-#ifndef _ALLBSD_SOURCE
- if (!UseLargePages) {
- UseHugeTLBFS = false;
- UseSHM = false;
- return;
- }
-
- if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
- // If UseLargePages is specified on the command line try both methods,
- // if it's default, then try only HugeTLBFS.
- if (FLAG_IS_DEFAULT(UseLargePages)) {
- UseHugeTLBFS = true;
- } else {
- UseHugeTLBFS = UseSHM = true;
- }
- }
-
- if (LargePageSizeInBytes) {
- _large_page_size = LargePageSizeInBytes;
- } else {
- // large_page_size on Bsd is used to round up heap size. x86 uses either
- // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
- // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
- // page as large as 256M.
- //
- // Here we try to figure out page size by parsing /proc/meminfo and looking
- // for a line with the following format:
- // Hugepagesize: 2048 kB
- //
- // If we can't determine the value (e.g. /proc is not mounted, or the text
- // format has been changed), we'll use the largest page size supported by
- // the processor.
-
-#ifndef ZERO
- _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
- ARM_ONLY(2 * M) PPC_ONLY(4 * M);
-#endif // ZERO
-
- FILE *fp = fopen("/proc/meminfo", "r");
- if (fp) {
- while (!feof(fp)) {
- int x = 0;
- char buf[16];
- if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
- if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
- _large_page_size = x * K;
- break;
- }
- } else {
- // skip to next line
- for (;;) {
- int ch = fgetc(fp);
- if (ch == EOF || ch == (int)'\n') break;
- }
- }
- }
- fclose(fp);
- }
- }
-
- // print a warning if any large page related flag is specified on command line
- bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-
- const size_t default_page_size = (size_t)Bsd::page_size();
- if (_large_page_size > default_page_size) {
- _page_sizes[0] = _large_page_size;
- _page_sizes[1] = default_page_size;
- _page_sizes[2] = 0;
- }
- UseHugeTLBFS = UseHugeTLBFS &&
- Bsd::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-
- if (UseHugeTLBFS)
- UseSHM = false;
-
- UseLargePages = UseHugeTLBFS || UseSHM;
-
- set_coredump_filter();
-#endif
}
-#ifndef _ALLBSD_SOURCE
-#ifndef SHM_HUGETLB
-#define SHM_HUGETLB 04000
-#endif
-#endif
char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
// "exec" is passed in but not used. Creating the shared image for
@@ -3267,11 +2235,7 @@
// Create a large shared memory region to attach to based on size.
// Currently, size is the total size of the heap
-#ifndef _ALLBSD_SOURCE
- int shmid = shmget(key, bytes, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W);
-#else
int shmid = shmget(key, bytes, IPC_CREAT|SHM_R|SHM_W);
-#endif
if (shmid == -1) {
// Possible reasons for shmget failure:
// 1. shmmax is too small for Java heap.
@@ -3558,7 +2522,7 @@
// this reason, the code should not be used as default (ThreadPriorityPolicy=0).
// It is only used when ThreadPriorityPolicy=1 and requires root privilege.
-#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__)
+#if !defined(__APPLE__)
int os::java_to_os_priority[CriticalPriority + 1] = {
19, // 0 Entry should never be used
@@ -3578,7 +2542,7 @@
31 // 11 CriticalPriority
};
-#elif defined(__APPLE__)
+#else
/* Using Mach high-level priority assignments */
int os::java_to_os_priority[CriticalPriority + 1] = {
0, // 0 Entry should never be used (MINPRI_USER)
@@ -3599,26 +2563,6 @@
36 // 11 CriticalPriority
};
-#else
-int os::java_to_os_priority[CriticalPriority + 1] = {
- 19, // 0 Entry should never be used
-
- 4, // 1 MinPriority
- 3, // 2
- 2, // 3
-
- 1, // 4
- 0, // 5 NormPriority
- -1, // 6
-
- -2, // 7
- -3, // 8
- -4, // 9 NearMaxPriority
-
- -5, // 10 MaxPriority
-
- -5 // 11 CriticalPriority
-};
#endif
static int prio_init() {
@@ -4179,22 +3123,6 @@
}
}
-#ifndef _ALLBSD_SOURCE
-// This is the fastest way to get thread cpu time on Bsd.
-// Returns cpu time (user+sys) for any thread, not only for current.
-// POSIX compliant clocks are implemented in the kernels 2.6.16+.
-// It might work on 2.6.10+ with a special kernel/glibc patch.
-// For reference, please, see IEEE Std 1003.1-2004:
-// http://www.unix.org/single_unix_specification
-
-jlong os::Bsd::fast_thread_cpu_time(clockid_t clockid) {
- struct timespec tp;
- int rc = os::Bsd::clock_gettime(clockid, &tp);
- assert(rc == 0, "clock_gettime is expected to return 0 code");
-
- return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
-}
-#endif
/////
// glibc on Bsd platform uses non-documented flag
@@ -4458,10 +3386,6 @@
// this is called _after_ the global arguments have been parsed
jint os::init_2(void)
{
-#ifndef _ALLBSD_SOURCE
- Bsd::fast_thread_clock_init();
-#endif
-
// Allocate a single page and mark it as readable for safepoint polling
address polling_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
guarantee( polling_page != MAP_FAILED, "os::init_2: failed to allocate polling page" );
@@ -4518,48 +3442,6 @@
JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes,
vm_page_size()));
-#ifndef _ALLBSD_SOURCE
- Bsd::capture_initial_stack(JavaThread::stack_size_at_create());
-
- Bsd::libpthread_init();
- if (PrintMiscellaneous && (Verbose || WizardMode)) {
- tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
- Bsd::glibc_version(), Bsd::libpthread_version(),
- Bsd::is_floating_stack() ? "floating stack" : "fixed stack");
- }
-
- if (UseNUMA) {
- if (!Bsd::libnuma_init()) {
- UseNUMA = false;
- } else {
- if ((Bsd::numa_max_node() < 1)) {
- // There's only one node(they start from 0), disable NUMA.
- UseNUMA = false;
- }
- }
- // With SHM large pages we cannot uncommit a page, so there's not way
- // we can make the adaptive lgrp chunk resizing work. If the user specified
- // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
- // disable adaptive resizing.
- if (UseNUMA && UseLargePages && UseSHM) {
- if (!FLAG_IS_DEFAULT(UseNUMA)) {
- if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
- UseLargePages = false;
- } else {
- warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
- UseAdaptiveSizePolicy = false;
- UseAdaptiveNUMAChunkSizing = false;
- }
- } else {
- UseNUMA = false;
- }
- }
- if (!UseNUMA && ForceNUMA) {
- UseNUMA = true;
- }
- }
-#endif
-
if (MaxFDLimit) {
// set the number of file descriptors to max. print out error
// if getrlimit/setrlimit fails but continue regardless.
@@ -4586,11 +3468,6 @@
}
}
-#ifndef _ALLBSD_SOURCE
- // Initialize lock used to serialize thread creation (see os::create_thread)
- Bsd::set_createThread_lock(new Mutex(Mutex::leaf, "createThread_lock", false));
-#endif
-
// at-exit methods are called in the reverse order of their registration.
// atexit functions are called on return from main or as a result of a
// call to exit(3C). There can be only 32 of these functions registered
@@ -4641,15 +3518,7 @@
};
int os::active_processor_count() {
-#ifdef _ALLBSD_SOURCE
return _processor_count;
-#else
- // Bsd doesn't yet have a (official) notion of processor sets,
- // so just return the number of online processors.
- int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
- assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
- return online_cpus;
-#endif
}
void os::set_native_thread_name(const char *name) {
@@ -4703,25 +3572,7 @@
int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
{
-#ifdef _ALLBSD_SOURCE
return pthread_cond_timedwait(_cond, _mutex, _abstime);
-#else
- if (is_NPTL()) {
- return pthread_cond_timedwait(_cond, _mutex, _abstime);
- } else {
-#ifndef IA64
- // 6292965: BsdThreads pthread_cond_timedwait() resets FPU control
- // word back to default 64bit precision if condvar is signaled. Java
- // wants 53bit precision. Save and restore current value.
- int fpu = get_fpu_control_word();
-#endif // IA64
- int status = pthread_cond_timedwait(_cond, _mutex, _abstime);
-#ifndef IA64
- set_fpu_control_word(fpu);
-#endif // IA64
- return status;
- }
-#endif
}
////////////////////////////////////////////////////////////////////////////////
@@ -5041,20 +3892,6 @@
return munmap(addr, bytes) == 0;
}
-#ifndef _ALLBSD_SOURCE
-static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
-
-static clockid_t thread_cpu_clockid(Thread* thread) {
- pthread_t tid = thread->osthread()->pthread_id();
- clockid_t clockid;
-
- // Get thread clockid
- int rc = os::Bsd::pthread_getcpuclockid(tid, &clockid);
- assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
- return clockid;
-}
-#endif
-
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
// of a thread.
@@ -5065,36 +3902,15 @@
jlong os::current_thread_cpu_time() {
#ifdef __APPLE__
return os::thread_cpu_time(Thread::current(), true /* user + sys */);
-#elif !defined(_ALLBSD_SOURCE)
- if (os::Bsd::supports_fast_thread_cpu_time()) {
- return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
- } else {
- // return user + sys since the cost is the same
- return slow_thread_cpu_time(Thread::current(), true /* user + sys */);
- }
#endif
}
jlong os::thread_cpu_time(Thread* thread) {
-#ifndef _ALLBSD_SOURCE
- // consistent with what current_thread_cpu_time() returns
- if (os::Bsd::supports_fast_thread_cpu_time()) {
- return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
- } else {
- return slow_thread_cpu_time(thread, true /* user + sys */);
- }
-#endif
}
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
#ifdef __APPLE__
return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
-#elif !defined(_ALLBSD_SOURCE)
- if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
- return os::Bsd::fast_thread_cpu_time(CLOCK_THREAD_CPUTIME_ID);
- } else {
- return slow_thread_cpu_time(Thread::current(), user_sys_cpu_time);
- }
#endif
}
@@ -5118,106 +3934,9 @@
} else {
return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
}
-#elif !defined(_ALLBSD_SOURCE)
- if (user_sys_cpu_time && os::Bsd::supports_fast_thread_cpu_time()) {
- return os::Bsd::fast_thread_cpu_time(thread_cpu_clockid(thread));
- } else {
- return slow_thread_cpu_time(thread, user_sys_cpu_time);
- }
#endif
}
-#ifndef _ALLBSD_SOURCE
-//
-// -1 on error.
-//
-
-static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
- static bool proc_pid_cpu_avail = true;
- static bool proc_task_unchecked = true;
- static const char *proc_stat_path = "/proc/%d/stat";
- pid_t tid = thread->osthread()->thread_id();
- int i;
- char *s;
- char stat[2048];
- int statlen;
- char proc_name[64];
- int count;
- long sys_time, user_time;
- char string[64];
- char cdummy;
- int idummy;
- long ldummy;
- FILE *fp;
-
- // We first try accessing /proc/<pid>/cpu since this is faster to
- // process. If this file is not present (bsd kernels 2.5 and above)
- // then we open /proc/<pid>/stat.
- if ( proc_pid_cpu_avail ) {
- sprintf(proc_name, "/proc/%d/cpu", tid);
- fp = fopen(proc_name, "r");
- if ( fp != NULL ) {
- count = fscanf( fp, "%s %lu %lu\n", string, &user_time, &sys_time);
- fclose(fp);
- if ( count != 3 ) return -1;
-
- if (user_sys_cpu_time) {
- return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
- } else {
- return (jlong)user_time * (1000000000 / clock_tics_per_sec);
- }
- }
- else proc_pid_cpu_avail = false;
- }
-
- // The /proc/<tid>/stat aggregates per-process usage on
- // new Bsd kernels 2.6+ where NPTL is supported.
- // The /proc/self/task/<tid>/stat still has the per-thread usage.
- // See bug 6328462.
- // There can be no directory /proc/self/task on kernels 2.4 with NPTL
- // and possibly in some other cases, so we check its availability.
- if (proc_task_unchecked && os::Bsd::is_NPTL()) {
- // This is executed only once
- proc_task_unchecked = false;
- fp = fopen("/proc/self/task", "r");
- if (fp != NULL) {
- proc_stat_path = "/proc/self/task/%d/stat";
- fclose(fp);
- }
- }
-
- sprintf(proc_name, proc_stat_path, tid);
- fp = fopen(proc_name, "r");
- if ( fp == NULL ) return -1;
- statlen = fread(stat, 1, 2047, fp);
- stat[statlen] = '\0';
- fclose(fp);
-
- // Skip pid and the command string. Note that we could be dealing with
- // weird command names, e.g. user could decide to rename java launcher
- // to "java 1.4.2 :)", then the stat file would look like
- // 1234 (java 1.4.2 :)) R ... ...
- // We don't really need to know the command string, just find the last
- // occurrence of ")" and then start parsing from there. See bug 4726580.
- s = strrchr(stat, ')');
- i = 0;
- if (s == NULL ) return -1;
-
- // Skip blank chars
- do s++; while (isspace(*s));
-
- count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
- &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
- &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
- &user_time, &sys_time);
- if ( count != 13 ) return -1;
- if (user_sys_cpu_time) {
- return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
- } else {
- return (jlong)user_time * (1000000000 / clock_tics_per_sec);
- }
-}
-#endif
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
@@ -5236,10 +3955,8 @@
bool os::is_thread_cpu_time_supported() {
#ifdef __APPLE__
return true;
-#elif defined(_ALLBSD_SOURCE)
+#else
return false;
-#else
- return true;
#endif
}
--- a/hotspot/src/os/bsd/vm/os_bsd.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os/bsd/vm/os_bsd.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -56,19 +56,6 @@
static int sigflags[MAXSIGNUM];
static int (*_clock_gettime)(clockid_t, struct timespec *);
-#ifndef _ALLBSD_SOURCE
- static int (*_pthread_getcpuclockid)(pthread_t, clockid_t *);
-
- static address _initial_thread_stack_bottom;
- static uintptr_t _initial_thread_stack_size;
-
- static const char *_glibc_version;
- static const char *_libpthread_version;
-
- static bool _is_floating_stack;
- static bool _is_NPTL;
- static bool _supports_fast_thread_cpu_time;
-#endif
static GrowableArray<int>* _cpu_to_node;
@@ -76,28 +63,14 @@
static julong _physical_memory;
static pthread_t _main_thread;
-#ifndef _ALLBSD_SOURCE
- static Mutex* _createThread_lock;
-#endif
static int _page_size;
static julong available_memory();
static julong physical_memory() { return _physical_memory; }
static void initialize_system_info();
-#ifndef _ALLBSD_SOURCE
- static void set_glibc_version(const char *s) { _glibc_version = s; }
- static void set_libpthread_version(const char *s) { _libpthread_version = s; }
-#endif
-
static bool supports_variable_stack_size();
-#ifndef _ALLBSD_SOURCE
- static void set_is_NPTL() { _is_NPTL = true; }
- static void set_is_BsdThreads() { _is_NPTL = false; }
- static void set_is_floating_stack() { _is_floating_stack = true; }
-#endif
-
static void rebuild_cpu_to_node_map();
static GrowableArray<int>* cpu_to_node() { return _cpu_to_node; }
@@ -106,25 +79,10 @@
public:
static void init_thread_fpu_state();
-#ifndef _ALLBSD_SOURCE
- static int get_fpu_control_word();
- static void set_fpu_control_word(int fpu_control);
-#endif
static pthread_t main_thread(void) { return _main_thread; }
-#ifndef _ALLBSD_SOURCE
- // returns kernel thread id (similar to LWP id on Solaris), which can be
- // used to access /proc
- static pid_t gettid();
- static void set_createThread_lock(Mutex* lk) { _createThread_lock = lk; }
- static Mutex* createThread_lock(void) { return _createThread_lock; }
-#endif
static void hotspot_sigmask(Thread* thread);
-#ifndef _ALLBSD_SOURCE
- static address initial_thread_stack_bottom(void) { return _initial_thread_stack_bottom; }
- static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
-#endif
static bool is_initial_thread(void);
static int page_size(void) { return _page_size; }
@@ -161,23 +119,6 @@
static struct sigaction *get_chained_signal_action(int sig);
static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
-#ifndef _ALLBSD_SOURCE
- // GNU libc and libpthread version strings
- static const char *glibc_version() { return _glibc_version; }
- static const char *libpthread_version() { return _libpthread_version; }
-
- // NPTL or BsdThreads?
- static bool is_BsdThreads() { return !_is_NPTL; }
- static bool is_NPTL() { return _is_NPTL; }
-
- // NPTL is always floating stack. BsdThreads could be using floating
- // stack or fixed stack.
- static bool is_floating_stack() { return _is_floating_stack; }
-
- static void libpthread_init();
- static bool libnuma_init();
- static void* libnuma_dlsym(void* handle, const char* name);
-#endif
// Minimum stack size a thread can be created with (allowing
// the VM to completely create the thread and enter user code)
static size_t min_stack_allowed;
@@ -186,22 +127,9 @@
static size_t default_stack_size(os::ThreadType thr_type);
static size_t default_guard_size(os::ThreadType thr_type);
-#ifndef _ALLBSD_SOURCE
- static void capture_initial_stack(size_t max_size);
-
- // Stack overflow handling
- static bool manually_expand_stack(JavaThread * t, address addr);
- static int max_register_window_saves_before_flushing();
-#endif
-
// Real-time clock functions
static void clock_init(void);
-#ifndef _ALLBSD_SOURCE
- // fast POSIX clocks support
- static void fast_thread_clock_init(void);
-#endif
-
static inline bool supports_monotonic_clock() {
return _clock_gettime != NULL;
}
@@ -210,18 +138,6 @@
return _clock_gettime ? _clock_gettime(clock_id, tp) : -1;
}
-#ifndef _ALLBSD_SOURCE
- static int pthread_getcpuclockid(pthread_t tid, clockid_t *clock_id) {
- return _pthread_getcpuclockid ? _pthread_getcpuclockid(tid, clock_id) : -1;
- }
-
- static bool supports_fast_thread_cpu_time() {
- return _supports_fast_thread_cpu_time;
- }
-
- static jlong fast_thread_cpu_time(clockid_t clockid);
-#endif
-
// Stack repair handling
// none present
--- a/hotspot/src/os/windows/vm/os_windows.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -22,7 +22,7 @@
*
*/
-// Must be at least Windows 2000 or XP to use VectoredExceptions and IsDebuggerPresent
+// Must be at least Windows 2000 or XP to use IsDebuggerPresent
#define _WIN32_WINNT 0x500
// no precompiled headers
@@ -110,10 +110,6 @@
static FILETIME process_user_time;
static FILETIME process_kernel_time;
-#ifdef _WIN64
-PVOID topLevelVectoredExceptionHandler = NULL;
-#endif
-
#ifdef _M_IA64
#define __CPU__ ia64
#elif _M_AMD64
@@ -136,12 +132,6 @@
case DLL_PROCESS_DETACH:
if(ForceTimeHighResolution)
timeEndPeriod(1L);
-#ifdef _WIN64
- if (topLevelVectoredExceptionHandler != NULL) {
- RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
- topLevelVectoredExceptionHandler = NULL;
- }
-#endif
break;
default:
break;
@@ -408,20 +398,14 @@
}
- if (UseVectoredExceptions) {
- // If we are using vectored exception we don't need to set a SEH
- thread->run();
- }
- else {
- // Install a win32 structured exception handler around every thread created
- // by VM, so VM can genrate error dump when an exception occurred in non-
- // Java thread (e.g. VM thread).
- __try {
- thread->run();
- } __except(topLevelExceptionFilter(
- (_EXCEPTION_POINTERS*)_exception_info())) {
- // Nothing to do.
- }
+ // Install a win32 structured exception handler around every thread created
+ // by VM, so VM can genrate error dump when an exception occurred in non-
+ // Java thread (e.g. VM thread).
+ __try {
+ thread->run();
+ } __except(topLevelExceptionFilter(
+ (_EXCEPTION_POINTERS*)_exception_info())) {
+ // Nothing to do.
}
// One less thread is executing
@@ -2489,16 +2473,6 @@
}
#endif
-#ifdef _WIN64
- // Windows will sometimes generate an access violation
- // when we call malloc. Since we use VectoredExceptions
- // on 64 bit platforms, we see this exception. We must
- // pass this exception on so Windows can recover.
- // We check to see if the pc of the fault is in NTDLL.DLL
- // if so, we pass control on to Windows for handling.
- if (UseVectoredExceptions && _addr_in_ntdll(pc)) return EXCEPTION_CONTINUE_SEARCH;
-#endif
-
// Stack overflow or null pointer exception in native code.
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
@@ -2527,30 +2501,8 @@
}
if (exception_code != EXCEPTION_BREAKPOINT) {
-#ifndef _WIN64
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
exceptionInfo->ContextRecord);
-#else
- // Itanium Windows uses a VectoredExceptionHandler
- // Which means that C++ programatic exception handlers (try/except)
- // will get here. Continue the search for the right except block if
- // the exception code is not a fatal code.
- switch ( exception_code ) {
- case EXCEPTION_ACCESS_VIOLATION:
- case EXCEPTION_STACK_OVERFLOW:
- case EXCEPTION_ILLEGAL_INSTRUCTION:
- case EXCEPTION_ILLEGAL_INSTRUCTION_2:
- case EXCEPTION_INT_OVERFLOW:
- case EXCEPTION_INT_DIVIDE_BY_ZERO:
- case EXCEPTION_UNCAUGHT_CXX_EXCEPTION:
- { report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
- exceptionInfo->ContextRecord);
- }
- break;
- default:
- break;
- }
-#endif
}
return EXCEPTION_CONTINUE_SEARCH;
}
@@ -3706,18 +3658,6 @@
// Setup Windows Exceptions
- // On Itanium systems, Structured Exception Handling does not
- // work since stack frames must be walkable by the OS. Since
- // much of our code is dynamically generated, and we do not have
- // proper unwind .xdata sections, the system simply exits
- // rather than delivering the exception. To work around
- // this we use VectorExceptions instead.
-#ifdef _WIN64
- if (UseVectoredExceptions) {
- topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelExceptionFilter);
- }
-#endif
-
// for debugging float code generation bugs
if (ForceFloatExceptions) {
#ifndef _WIN64
--- a/hotspot/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/bytes_bsd_x86.inline.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -25,10 +25,6 @@
#ifndef OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP
#define OS_CPU_BSD_X86_VM_BYTES_BSD_X86_INLINE_HPP
-#ifndef _ALLBSD_SOURCE
-#include <byteswap.h>
-#endif
-
#ifdef __APPLE__
#include <libkern/OSByteOrder.h>
#endif
--- a/hotspot/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/globals_bsd_x86.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -48,7 +48,5 @@
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_BSD_X86_VM_GLOBALS_BSD_X86_HPP
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -76,7 +76,7 @@
# include <ucontext.h>
#endif
-#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
+#if !defined(__APPLE__) && !defined(__NetBSD__)
# include <pthread_np.h>
#endif
@@ -489,23 +489,6 @@
// to handle_unexpected_exception way down below.
thread->disable_stack_red_zone();
tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
-#ifndef _ALLBSD_SOURCE
- } else {
- // Accessing stack address below sp may cause SEGV if current
- // thread has MAP_GROWSDOWN stack. This should only happen when
- // current thread was created by user code with MAP_GROWSDOWN flag
- // and then attached to VM. See notes in os_bsd.cpp.
- if (thread->osthread()->expanding_stack() == 0) {
- thread->osthread()->set_expanding_stack();
- if (os::Bsd::manually_expand_stack(thread, addr)) {
- thread->osthread()->clear_expanding_stack();
- return 1;
- }
- thread->osthread()->clear_expanding_stack();
- } else {
- fatal("recursive segv. expanding stack.");
- }
-#endif
}
}
}
@@ -744,61 +727,21 @@
ShouldNotReachHere();
}
-#ifdef _ALLBSD_SOURCE
// From solaris_i486.s ported to bsd_i486.s
extern "C" void fixcw();
-#endif
void os::Bsd::init_thread_fpu_state(void) {
#ifndef AMD64
-# ifdef _ALLBSD_SOURCE
// Set fpu to 53 bit precision. This happens too early to use a stub.
fixcw();
-# else
- // set fpu to 53 bit precision
- set_fpu_control_word(0x27f);
-# endif
#endif // !AMD64
}
-#ifndef _ALLBSD_SOURCE
-int os::Bsd::get_fpu_control_word(void) {
-#ifdef AMD64
- return 0;
-#else
- int fpu_control;
- _FPU_GETCW(fpu_control);
- return fpu_control & 0xffff;
-#endif // AMD64
-}
-
-void os::Bsd::set_fpu_control_word(int fpu_control) {
-#ifndef AMD64
- _FPU_SETCW(fpu_control);
-#endif // !AMD64
-}
-#endif
// Check that the bsd kernel version is 2.4 or higher since earlier
// versions do not support SSE without patches.
bool os::supports_sse() {
-#if defined(AMD64) || defined(_ALLBSD_SOURCE)
return true;
-#else
- struct utsname uts;
- if( uname(&uts) != 0 ) return false; // uname fails?
- char *minor_string;
- int major = strtol(uts.release,&minor_string,10);
- int minor = strtol(minor_string+1,NULL,10);
- bool result = (major > 2 || (major==2 && minor >= 4));
-#ifndef PRODUCT
- if (PrintMiscellaneous && Verbose) {
- tty->print("OS version is %d.%d, which %s support SSE/SSE2\n",
- major,minor, result ? "DOES" : "does NOT");
- }
-#endif
- return result;
-#endif // AMD64
}
bool os::is_allocatable(size_t bytes) {
@@ -836,46 +779,7 @@
#define GET_GS() ({int gs; __asm__ volatile("movw %%gs, %w0":"=q"(gs)); gs&0xffff;})
#endif
-#ifdef _ALLBSD_SOURCE
bool os::Bsd::supports_variable_stack_size() { return true; }
-#else
-// Test if pthread library can support variable thread stack size. BsdThreads
-// in fixed stack mode allocates 2M fixed slot for each thread. BsdThreads
-// in floating stack mode and NPTL support variable stack size.
-bool os::Bsd::supports_variable_stack_size() {
- if (os::Bsd::is_NPTL()) {
- // NPTL, yes
- return true;
-
- } else {
- // Note: We can't control default stack size when creating a thread.
- // If we use non-default stack size (pthread_attr_setstacksize), both
- // floating stack and non-floating stack BsdThreads will return the
- // same value. This makes it impossible to implement this function by
- // detecting thread stack size directly.
- //
- // An alternative approach is to check %gs. Fixed-stack BsdThreads
- // do not use %gs, so its value is 0. Floating-stack BsdThreads use
- // %gs (either as LDT selector or GDT selector, depending on kernel)
- // to access thread specific data.
- //
- // Note that %gs is a reserved glibc register since early 2001, so
- // applications are not allowed to change its value (Ulrich Drepper from
- // Redhat confirmed that all known offenders have been modified to use
- // either %fs or TSD). In the worst case scenario, when VM is embedded in
- // a native application that plays with %gs, we might see non-zero %gs
- // even BsdThreads is running in fixed stack mode. As the result, we'll
- // return true and skip _thread_safety_check(), so we may not be able to
- // detect stack-heap collisions. But otherwise it's harmless.
- //
-#ifdef __GNUC__
- return (GET_GS() != 0);
-#else
- return false;
-#endif
- }
-}
-#endif
#endif // AMD64
// return default stack size for thr_type
@@ -943,7 +847,7 @@
*bottom = (address)((char *)ss.ss_sp - ss.ss_size);
*size = ss.ss_size;
-#elif defined(_ALLBSD_SOURCE)
+#else
pthread_attr_t attr;
int rslt = pthread_attr_init(&attr);
@@ -963,33 +867,6 @@
}
pthread_attr_destroy(&attr);
-#else
- if (os::Bsd::is_initial_thread()) {
- // initial thread needs special handling because pthread_getattr_np()
- // may return bogus value.
- *bottom = os::Bsd::initial_thread_stack_bottom();
- *size = os::Bsd::initial_thread_stack_size();
- } else {
- pthread_attr_t attr;
-
- int rslt = pthread_getattr_np(pthread_self(), &attr);
-
- // JVM needs to know exact stack location, abort if it fails
- if (rslt != 0) {
- if (rslt == ENOMEM) {
- vm_exit_out_of_memory(0, "pthread_getattr_np");
- } else {
- fatal(err_msg("pthread_getattr_np failed with errno = %d", rslt));
- }
- }
-
- if (pthread_attr_getstack(&attr, (void **)bottom, size) != 0) {
- fatal("Can not locate current stack attributes!");
- }
-
- pthread_attr_destroy(&attr);
-
- }
#endif
assert(os::current_stack_pointer() >= *bottom &&
os::current_stack_pointer() < *bottom + *size, "just checking");
--- a/hotspot/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/bsd_zero/vm/globals_bsd_zero.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -41,7 +41,6 @@
define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
-define_pd_global(bool, UseVectoredExceptions, false);
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -23,7 +23,7 @@
*
*/
-#if defined(_ALLBSD_SOURCE) && !defined(__APPLE__) && !defined(__NetBSD__)
+#if !defined(__APPLE__) && !defined(__NetBSD__)
#include <pthread.h>
# include <pthread_np.h> /* For pthread_attr_get_np */
#endif
@@ -178,26 +178,6 @@
thread->disable_stack_red_zone();
ShouldNotCallThis();
}
-#ifndef _ALLBSD_SOURCE
- else {
- // Accessing stack address below sp may cause SEGV if
- // current thread has MAP_GROWSDOWN stack. This should
- // only happen when current thread was created by user
- // code with MAP_GROWSDOWN flag and then attached to VM.
- // See notes in os_bsd.cpp.
- if (thread->osthread()->expanding_stack() == 0) {
- thread->osthread()->set_expanding_stack();
- if (os::Bsd::manually_expand_stack(thread, addr)) {
- thread->osthread()->clear_expanding_stack();
- return true;
- }
- thread->osthread()->clear_expanding_stack();
- }
- else {
- fatal("recursive segv. expanding stack.");
- }
- }
-#endif
}
}
@@ -266,16 +246,6 @@
// Nothing to do
}
-#ifndef _ALLBSD_SOURCE
-int os::Bsd::get_fpu_control_word() {
- ShouldNotCallThis();
-}
-
-void os::Bsd::set_fpu_control_word(int fpu) {
- ShouldNotCallThis();
-}
-#endif
-
bool os::is_allocatable(size_t bytes) {
#ifdef _LP64
return true;
@@ -339,7 +309,7 @@
stack_top = (address) ss.ss_sp;
stack_bytes = ss.ss_size;
stack_bottom = stack_top - stack_bytes;
-#elif defined(_ALLBSD_SOURCE)
+#else
pthread_attr_t attr;
int rslt = pthread_attr_init(&attr);
@@ -362,67 +332,6 @@
pthread_attr_destroy(&attr);
stack_top = stack_bottom + stack_bytes;
-#else /* Linux */
- pthread_attr_t attr;
- int res = pthread_getattr_np(pthread_self(), &attr);
- if (res != 0) {
- if (res == ENOMEM) {
- vm_exit_out_of_memory(0, "pthread_getattr_np");
- }
- else {
- fatal(err_msg("pthread_getattr_np failed with errno = " INT32_FORMAT,
- res));
- }
- }
-
- res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
- if (res != 0) {
- fatal(err_msg("pthread_attr_getstack failed with errno = " INT32_FORMAT,
- res));
- }
- stack_top = stack_bottom + stack_bytes;
-
- // The block of memory returned by pthread_attr_getstack() includes
- // guard pages where present. We need to trim these off.
- size_t page_bytes = os::Bsd::page_size();
- assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
-
- size_t guard_bytes;
- res = pthread_attr_getguardsize(&attr, &guard_bytes);
- if (res != 0) {
- fatal(err_msg(
- "pthread_attr_getguardsize failed with errno = " INT32_FORMAT, res));
- }
- int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
- assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
-
-#ifdef IA64
- // IA64 has two stacks sharing the same area of memory, a normal
- // stack growing downwards and a register stack growing upwards.
- // Guard pages, if present, are in the centre. This code splits
- // the stack in two even without guard pages, though in theory
- // there's nothing to stop us allocating more to the normal stack
- // or more to the register stack if one or the other were found
- // to grow faster.
- int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
- stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
-#endif // IA64
-
- stack_bottom += guard_bytes;
-
- pthread_attr_destroy(&attr);
-
- // The initial thread has a growable stack, and the size reported
- // by pthread_attr_getstack is the maximum size it could possibly
- // be given what currently mapped. This can be huge, so we cap it.
- if (os::Bsd::is_initial_thread()) {
- stack_bytes = stack_top - stack_bottom;
-
- if (stack_bytes > JavaThread::stack_size_at_create())
- stack_bytes = JavaThread::stack_size_at_create();
-
- stack_bottom = stack_top - stack_bytes;
- }
#endif
assert(os::current_stack_pointer() >= stack_bottom, "should do");
--- a/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/linux_sparc/vm/globals_linux_sparc.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -35,7 +35,5 @@
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx, HeapBaseMinAddress, CONST64(4)*G);
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_LINUX_SPARC_VM_GLOBALS_LINUX_SPARC_HPP
--- a/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/globals_linux_x86.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -46,7 +46,5 @@
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx,HeapBaseMinAddress, 2*G);
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_LINUX_X86_VM_GLOBALS_LINUX_X86_HPP
--- a/hotspot/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/linux_zero/vm/globals_linux_zero.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -41,7 +41,6 @@
define_pd_global(intx, CompilerThreadStackSize, 0);
define_pd_global(uintx, JVMInvokeMethodSlack, 8192);
-define_pd_global(bool, UseVectoredExceptions, false);
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
--- a/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -39,8 +39,6 @@
#else
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
#endif
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions, false);
--- a/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -45,7 +45,5 @@
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx,HeapBaseMinAddress, 256*M);
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/globals_windows_x86.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -47,7 +47,5 @@
// Used on 64 bit platforms for UseCompressedOops base address or CDS
define_pd_global(uintx, HeapBaseMinAddress, 2*G);
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions, false);
#endif // OS_CPU_WINDOWS_X86_VM_GLOBALS_WINDOWS_X86_HPP
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -175,9 +175,6 @@
PRUNTIME_FUNCTION prt;
PUNWIND_INFO_EH_ONLY punwind;
- // If we are using Vectored Exceptions we don't need this registration
- if (UseVectoredExceptions) return true;
-
BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData));
CodeBuffer cb(blob);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/share/vm/asm/codeBuffer.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/asm/codeBuffer.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -758,7 +758,7 @@
}
}
- if (dest->blob() == NULL) {
+ if (dest->blob() == NULL && dest_filled != NULL) {
// Destination is a final resting place, not just another buffer.
// Normalize uninitialized bytes in the final padding.
Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -115,6 +115,7 @@
/* Java runtime version access */ \
template(sun_misc_Version, "sun/misc/Version") \
template(java_runtime_name_name, "java_runtime_name") \
+ template(java_runtime_version_name, "java_runtime_version") \
\
/* class file format tags */ \
template(tag_source_file, "SourceFile") \
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -320,6 +320,7 @@
void bang_stack_shadow_pages(bool native_call);
void generate_all();
+ void initialize_method_handle_entries();
public:
AbstractInterpreterGenerator(StubQueue* _code);
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -235,10 +235,6 @@
#endif
#endif
-// JavaStack Implementation
-#define MORE_STACK(count) \
- (topOfStack -= ((count) * Interpreter::stackElementWords))
-
#define UPDATE_PC(opsize) {pc += opsize; }
/*
@@ -575,7 +571,7 @@
/* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
-/* 0xE8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
+/* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
/* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
/* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
@@ -1773,7 +1769,7 @@
oop obj;
if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
- Klass* k = (Klass*) cache->f1();
+ Klass* k = cache->f1_as_klass();
obj = k->java_mirror();
MORE_STACK(1); // Assume single slot push
} else {
@@ -1885,7 +1881,7 @@
--count;
}
if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
- Klass* k = (Klass*) cache->f1();
+ Klass* k = cache->f1_as_klass();
obj = k->java_mirror();
} else {
--count;
@@ -2190,6 +2186,7 @@
}
CASE(_invokedynamic): {
+
if (!EnableInvokeDynamic) {
// We should not encounter this bytecode if !EnableInvokeDynamic.
// The verifier will stop it. However, if we get past the verifier,
@@ -2199,26 +2196,64 @@
ShouldNotReachHere();
}
- int index = Bytes::get_native_u4(pc+1);
+ u4 index = Bytes::get_native_u4(pc+1);
+ ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
// We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
// This kind of CP cache entry does not need to match the flags byte, because
// there is a 1-1 relation between bytecode type and CP entry type.
- ConstantPool* constants = METHOD->constants();
- oop result = constants->resolved_references()->obj_at(index);
- if (result == NULL) {
+ if (! cache->is_resolved((Bytecodes::Code) opcode)) {
CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
handle_exception);
- result = THREAD->vm_result();
+ cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
+ }
+
+ Method* method = cache->f1_as_method();
+ VERIFY_OOP(method);
+
+ if (cache->has_appendix()) {
+ ConstantPool* constants = METHOD->constants();
+ SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
+ MORE_STACK(1);
+ }
+
+ istate->set_msg(call_method);
+ istate->set_callee(method);
+ istate->set_callee_entry_point(method->from_interpreted_entry());
+ istate->set_bcp_advance(5);
+
+ UPDATE_PC_AND_RETURN(0); // I'll be back...
+ }
+
+ CASE(_invokehandle): {
+
+ if (!EnableInvokeDynamic) {
+ ShouldNotReachHere();
}
- VERIFY_OOP(result);
- oop method_handle = java_lang_invoke_CallSite::target(result);
- CHECK_NULL(method_handle);
-
- istate->set_msg(call_method_handle);
- istate->set_callee((Method*) method_handle);
- istate->set_bcp_advance(5);
+ u2 index = Bytes::get_native_u2(pc+1);
+ ConstantPoolCacheEntry* cache = cp->entry_at(index);
+
+ if (! cache->is_resolved((Bytecodes::Code) opcode)) {
+ CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
+ handle_exception);
+ cache = cp->entry_at(index);
+ }
+
+ Method* method = cache->f1_as_method();
+
+ VERIFY_OOP(method);
+
+ if (cache->has_appendix()) {
+ ConstantPool* constants = METHOD->constants();
+ SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
+ MORE_STACK(1);
+ }
+
+ istate->set_msg(call_method);
+ istate->set_callee(method);
+ istate->set_callee_entry_point(method->from_interpreted_entry());
+ istate->set_bcp_advance(3);
UPDATE_PC_AND_RETURN(0); // I'll be back...
}
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -50,6 +50,10 @@
#ifdef CC_INTERP
+// JavaStack Implementation
+#define MORE_STACK(count) \
+ (topOfStack -= ((count) * Interpreter::stackElementWords))
+
// CVM definitions find hotspot equivalents...
union VMJavaVal64 {
@@ -107,7 +111,6 @@
rethrow_exception, // unwinding and throwing exception
// requests to frame manager from C++ interpreter
call_method, // request for new frame from interpreter, manager responds with method_entry
- call_method_handle, // like the above, except the callee is a method handle
return_from_method, // request from interpreter to unwind, manager responds with method_continue
more_monitors, // need a new monitor
throwing_exception, // unwind stack and rethrow
--- a/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -117,7 +117,6 @@
method_entry(empty);
method_entry(accessor);
method_entry(abstract);
- method_entry(method_handle);
method_entry(java_lang_math_sin );
method_entry(java_lang_math_cos );
method_entry(java_lang_math_tan );
@@ -125,7 +124,12 @@
method_entry(java_lang_math_sqrt );
method_entry(java_lang_math_log );
method_entry(java_lang_math_log10 );
+ method_entry(java_lang_math_pow );
+ method_entry(java_lang_math_exp );
method_entry(java_lang_ref_reference_get);
+
+ initialize_method_handle_entries();
+
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
method_entry(native);
method_entry(native_synchronized);
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -464,3 +464,11 @@
}
}
}
+
+void AbstractInterpreterGenerator::initialize_method_handle_entries() {
+ // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
+ for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) {
+ Interpreter::MethodKind kind = (Interpreter::MethodKind) i;
+ Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
+ }
+}
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -373,11 +373,7 @@
method_entry(java_lang_math_pow )
method_entry(java_lang_ref_reference_get)
- // method handle entry kinds are generated later in MethodHandlesAdapterGenerator::generate:
- for (int i = Interpreter::method_handle_invoke_FIRST; i <= Interpreter::method_handle_invoke_LAST; i++) {
- Interpreter::MethodKind kind = (Interpreter::MethodKind) i;
- Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
- }
+ initialize_method_handle_entries();
// all native method kinds (must be one contiguous block)
Interpreter::_native_entry_begin = Interpreter::code()->code_end();
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -742,6 +742,8 @@
uint gc_count = 0;
uint full_gc_count = 0;
+ assert(!Heap_lock->owned_by_self(), "Should not be holding the Heap_lock");
+
do {
MetaWord* result = NULL;
if (GC_locker::is_active_and_needs_gc()) {
@@ -756,7 +758,6 @@
}
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
- MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GC_locker::stall_until_clear();
// The GC invoked by the last thread leaving the critical
--- a/hotspot/src/share/vm/prims/jvmti.xml Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/prims/jvmti.xml Wed Jul 05 18:28:20 2017 +0200
@@ -1,7 +1,7 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
<?xml-stylesheet type="text/xsl" href="jvmti.xsl"?>
<!--
- Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -358,7 +358,7 @@
<specification label="JVM(TM) Tool Interface"
majorversion="1"
minorversion="2"
- microversion="1">
+ microversion="2">
<title subtitle="Version">
<tm>JVM</tm> Tool Interface
</title>
@@ -405,7 +405,7 @@
interfaces are more appropriate than <jvmti/> for many tools.
For more information on the Java Platform Debugger Architecture,
see the
- <externallink id="http://java.sun.com/products/jpda/">Java
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jpda/architecture.html">Java
Platform Debugger Architecture website</externallink>.
</intro>
@@ -693,7 +693,7 @@
An agent creates a <jvmti/> environment
by passing a <jvmti/> version
as the interface ID to the JNI Invocation API function
- <externallink id="http://java.sun.com/javase/6/docs/technotes/guides/jni/spec/invocation.html#GetEnv"><code>GetEnv</code></externallink>.
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/invocation.html#GetEnv"><code>GetEnv</code></externallink>.
See <internallink id="jvmtiEnvAccess">Accessing <jvmti/> Functions</internallink>
for more details on the creation and use of
<jvmti/> environments.
@@ -797,7 +797,7 @@
Modified UTF-8 differs
from standard UTF-8 in the representation of supplementary characters
and of the null character. See the
- <externallink id="http://java.sun.com/javase/6/docs/technotes/guides/jni/spec/types.html#wp16542">
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/types.html#wp16542">
Modified UTF-8 Strings</externallink>
section of the JNI specification for details.
</intro>
@@ -827,7 +827,7 @@
by calling <jvmti/> functions.
Access to <jvmti/> functions is by use of an interface pointer
in the same manner as
- <externallink id="http://java.sun.com/javase/6/docs/technotes/guides/jni/spec/design.html">Java
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/design.html">Java
Native Interface (JNI) functions</externallink> are accessed.
The <jvmti/> interface pointer is called the
<i>environment pointer</i>.
@@ -919,7 +919,7 @@
local references--these local references are created
during the <jvmti/> call.
Local references are a resource that must be managed (see the
- <externallink id="http://java.sun.com/javase/6/docs/guide/jni/spec/functions.html#wp18654">JNI Documentation</externallink>).
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/functions.html#wp18654">JNI Documentation</externallink>).
When threads return from native code all local references
are freed. Note that some threads, including typical
agent threads, will never return from native code.
@@ -954,7 +954,7 @@
<jvmti/> function.
See the
<externallink
- id="http://java.sun.com/javase/6/docs/technotes/guides/jni/spec/design.html#wp770"
+ id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/design.html#wp770"
>Java Exceptions</externallink>
section of the JNI specification for information on handling exceptions.
</intro>
@@ -2024,7 +2024,7 @@
<p/>
Upon execution of <code>proc</code>, the new thread will be attached to the
VM--see the JNI documentation on
- <externallink id="http://java.sun.com/javase/6/docs/technotes/guides/jni/spec/invocation.html#wp1060"
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/invocation.html#wp1060"
>Attaching to the VM</externallink>.
</description>
<origin>jvmdiClone</origin>
@@ -4010,7 +4010,7 @@
</inptr>
<description>
Details about the reference.
- Set when the <paramlink id="reference_kind"/> is
+ Set when the <datalink id="jvmtiHeapReferenceCallback.reference_kind">reference_kind</datalink> is
<datalink id="JVMTI_HEAP_REFERENCE_FIELD"/>,
<datalink id="JVMTI_HEAP_REFERENCE_STATIC_FIELD"/>,
<datalink id="JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT"/>,
@@ -4378,7 +4378,7 @@
do not control which objects are visited but they do control which
objects and primitive values are reported by the callbacks.
For example, if the only callback that was set is
- <paramlink id="array_primitive_value_callback"/> and <code>klass</code>
+ <fieldlink id="array_primitive_value_callback" struct="jvmtiHeapCallbacks"/> and <code>klass</code>
is set to the array of bytes class, then only arrays of byte will be
reported.
The table below summarizes this:
@@ -4414,7 +4414,7 @@
</tr>
<tr>
<th align="left">
- <fieldlink id="object_reference_callback" struct="jvmtiHeapCallbacks"/>
+ <fieldlink id="array_primitive_value_callback" struct="jvmtiHeapCallbacks"/>
in <paramlink id="callbacks"/> set
</th>
<td>
@@ -4570,7 +4570,7 @@
do not control which objects are visited but they do control which
objects and primitive values are reported by the callbacks.
For example, if the only callback that was set is
- <paramlink id="array_primitive_value_callback"/> and <code>klass</code>
+ <fieldlink id="array_primitive_value_callback" struct="jvmtiHeapCallbacks"/> and <code>klass</code>
is set to the array of bytes class, then only arrays of byte will be
reported. The table below summarizes this (contrast this with
<functionlink id="FollowReferences"/>):
@@ -4606,7 +4606,7 @@
</tr>
<tr>
<th align="left">
- <fieldlink id="object_callback" struct="jvmtiHeapCallbacks"/>
+ <fieldlink id="array_primitive_value_callback" struct="jvmtiHeapCallbacks"/>
in <paramlink id="callbacks"/> set
</th>
<td>
@@ -6478,7 +6478,7 @@
<synopsis>Get Class Signature</synopsis>
<description>
For the class indicated by <code>klass</code>, return the
- <externallink id="http://java.sun.com/javase/6/docs/guide/jni/spec/types.html#wp16432">JNI
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/types.html#wp16432">JNI
type signature</externallink>
and the generic signature of the class.
For example, <code>java.util.List</code> is <code>"Ljava/util/List;"</code>
@@ -8763,7 +8763,7 @@
Provides the ability to intercept and resend
Java Native Interface (JNI) function calls
by manipulating the JNI function table.
- See <externallink id="http://java.sun.com/javase/6/docs/guide/jni/spec/functions.html">JNI
+ See <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/functions.html">JNI
Functions</externallink> in the <i>Java Native Interface Specification</i>.
<p/>
The following example illustrates intercepting the
@@ -10446,7 +10446,7 @@
for a class. The segment is typically a directory or JAR file.
<p/>
In the live phase the <paramlink id="segment"/> may be used to specify any platform-dependent
- path to a <externallink id="http://java.sun.com/javase/6/docs/guide/jar/jar.html">
+ path to a <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html">
JAR file</externallink>. The agent should take care that the JAR file does not
contain any classes or resources other than those to be defined by the bootstrap
class loader for the purposes of instrumentation.
@@ -10494,7 +10494,7 @@
for a class. The segment is typically a directory or JAR file.
<p/>
In the live phase the <paramlink id="segment"/> is a platform-dependent path to a <externallink
- id="http://java.sun.com/javase/6/docs/guide/jar/jar.html">JAR file</externallink> to be
+ id="http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html">JAR file</externallink> to be
searched after the system class loader unsuccessfully searches for a class. The agent should
take care that the JAR file does not contain any classes or resources other than those to be
defined by the system class loader for the purposes of instrumentation.
@@ -13128,6 +13128,12 @@
Unsigned 8 bits.
</description>
</basetype>
+ <basetype id="jchar">
+ <description>
+ Holds a Java programming language <code>char</code>.
+ Unsigned 16 bits.
+ </description>
+ </basetype>
<basetype id="jint">
<description>
Holds a Java programming language <code>int</code>.
@@ -13285,7 +13291,7 @@
<description>
Typedef for the JNI function table <code>JNINativeInterface</code>
defined in the
- <externallink id="http://java.sun.com/javase/6/docs/guide/jni/spec/functions.html#wp23720">JNI Specification</externallink>.
+ <externallink id="http://docs.oracle.com/javase/7/docs/technotes/guides/jni/spec/functions.html#wp23720">JNI Specification</externallink>.
The JNI reference implementation defines this with an underscore.
</description>
</basetype>
@@ -14252,6 +14258,9 @@
<change date="6 August 2006" version="1.1.102">
Add ResourceExhaustedEvent.
</change>
+ <change date="11 October 2012" version="1.2.2">
+ Fixed the "HTTP" and "Missing Anchor" errors reported by the LinkCheck tool.
+ </change>
</changehistory>
</specification>
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -69,7 +69,7 @@
enum {
JDK15_JVMTI_VERSION = JVMTI_VERSION_1_0 + 33, /* version: 1.0.33 */
JDK16_JVMTI_VERSION = JVMTI_VERSION_1_1 + 102, /* version: 1.1.102 */
- JDK17_JVMTI_VERSION = JVMTI_VERSION_1_2 + 1 /* version: 1.2.1 */
+ JDK17_JVMTI_VERSION = JVMTI_VERSION_1_2 + 2 /* version: 1.2.2 */
};
static jvmtiPhase get_phase() { return _phase; }
--- a/hotspot/src/share/vm/runtime/arguments.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -257,6 +257,7 @@
{ "MaxPermHeapExpansion", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "CMSRevisitStackSize", JDK_Version::jdk(8), JDK_Version::jdk(9) },
{ "PrintRevisitStats", JDK_Version::jdk(8), JDK_Version::jdk(9) },
+ { "UseVectoredExceptions", JDK_Version::jdk(8), JDK_Version::jdk(9) },
#ifdef PRODUCT
{ "DesiredMethodLimit",
JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
@@ -2568,7 +2569,9 @@
FLAG_SET_CMDLINE(uintx, MaxNewSize, NewSize);
}
+#ifndef _ALLBSD_SOURCE // UseLargePages is not yet supported on BSD.
FLAG_SET_DEFAULT(UseLargePages, true);
+#endif
// Increase some data structure sizes for efficiency
FLAG_SET_CMDLINE(uintx, BaseFootPrintEstimate, MaxHeapSize);
@@ -3133,6 +3136,10 @@
UNSUPPORTED_OPTION(UseG1GC, "G1 GC");
#endif
+#ifdef _ALLBSD_SOURCE // UseLargePages is not yet supported on BSD.
+ UNSUPPORTED_OPTION(UseLargePages, "-XX:+UseLargePages");
+#endif
+
#if !INCLUDE_ALTERNATE_GCS
if (UseParallelGC) {
warning("Parallel GC is not supported in this VM. Using Serial GC.");
--- a/hotspot/src/share/vm/runtime/globals.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -857,9 +857,6 @@
develop(bool, BreakAtWarning, false, \
"Execute breakpoint upon encountering VM warning") \
\
- product_pd(bool, UseVectoredExceptions, \
- "Temp Flag - Use Vectored Exceptions rather than SEH (Windows Only)") \
- \
develop(bool, TraceVMOperation, false, \
"Trace vm operations") \
\
--- a/hotspot/src/share/vm/runtime/java.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/runtime/java.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -688,6 +688,7 @@
JDK_Version JDK_Version::_current;
const char* JDK_Version::_runtime_name;
+const char* JDK_Version::_runtime_version;
void JDK_Version::initialize() {
jdk_version_info info;
--- a/hotspot/src/share/vm/runtime/java.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/runtime/java.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -75,6 +75,7 @@
static JDK_Version _current;
static const char* _runtime_name;
+ static const char* _runtime_version;
// In this class, we promote the minor version of release to be the
// major version for releases >= 5 in anticipation of the JDK doing the
@@ -189,6 +190,13 @@
_runtime_name = name;
}
+ static const char* runtime_version() {
+ return _runtime_version;
+ }
+ static void set_runtime_version(const char* version) {
+ _runtime_version = version;
+ }
+
// Convenience methods for queries on the current major/minor version
static bool is_jdk12x_version() {
return current().compare_major(2) == 0;
--- a/hotspot/src/share/vm/runtime/thread.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -1042,6 +1042,7 @@
}
char java_runtime_name[128] = "";
+char java_runtime_version[128] = "";
// extract the JRE name from sun.misc.Version.java_runtime_name
static const char* get_java_runtime_name(TRAPS) {
@@ -1064,6 +1065,27 @@
}
}
+// extract the JRE version from sun.misc.Version.java_runtime_version
+static const char* get_java_runtime_version(TRAPS) {
+ Klass* k = SystemDictionary::find(vmSymbols::sun_misc_Version(),
+ Handle(), Handle(), CHECK_AND_CLEAR_NULL);
+ fieldDescriptor fd;
+ bool found = k != NULL &&
+ InstanceKlass::cast(k)->find_local_field(vmSymbols::java_runtime_version_name(),
+ vmSymbols::string_signature(), &fd);
+ if (found) {
+ oop name_oop = k->java_mirror()->obj_field(fd.offset());
+ if (name_oop == NULL)
+ return NULL;
+ const char* name = java_lang_String::as_utf8_string(name_oop,
+ java_runtime_version,
+ sizeof(java_runtime_version));
+ return name;
+ } else {
+ return NULL;
+ }
+}
+
// General purpose hook into Java code, run once when the VM is initialized.
// The Java library method itself may be changed independently from the VM.
static void call_postVMInitHook(TRAPS) {
@@ -3473,6 +3495,7 @@
// get the Java runtime name after java.lang.System is initialized
JDK_Version::set_runtime_name(get_java_runtime_name(THREAD));
+ JDK_Version::set_runtime_version(get_java_runtime_version(THREAD));
} else {
warning("java.lang.System not initialized");
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -2474,7 +2474,7 @@
/* frame */ \
/**********************/ \
\
- X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset)) \
+ NOT_ZERO(X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset))) \
declare_constant(frame::pc_return_offset) \
\
/*************/ \
--- a/hotspot/src/share/vm/utilities/macros.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/utilities/macros.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -282,6 +282,22 @@
#define NOT_WIN64(code) code
#endif
+#if defined(ZERO)
+#define ZERO_ONLY(code) code
+#define NOT_ZERO(code)
+#else
+#define ZERO_ONLY(code)
+#define NOT_ZERO(code) code
+#endif
+
+#if defined(SHARK)
+#define SHARK_ONLY(code) code
+#define NOT_SHARK(code)
+#else
+#define SHARK_ONLY(code)
+#define NOT_SHARK(code) code
+#endif
+
#if defined(IA32) || defined(AMD64)
#define X86
#define X86_ONLY(code) code
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp Wed Jul 05 18:28:20 2017 +0200
@@ -496,9 +496,7 @@
}
}
- bool steal_1_random(uint queue_num, int* seed, E& t);
bool steal_best_of_2(uint queue_num, int* seed, E& t);
- bool steal_best_of_all(uint queue_num, int* seed, E& t);
void register_queue(uint i, T* q);
@@ -538,46 +536,6 @@
}
template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal_best_of_all(uint queue_num, int* seed, E& t) {
- if (_n > 2) {
- int best_k;
- uint best_sz = 0;
- for (uint k = 0; k < _n; k++) {
- if (k == queue_num) continue;
- uint sz = _queues[k]->size();
- if (sz > best_sz) {
- best_sz = sz;
- best_k = k;
- }
- }
- return best_sz > 0 && _queues[best_k]->pop_global(t);
- } else if (_n == 2) {
- // Just try the other one.
- int k = (queue_num + 1) % 2;
- return _queues[k]->pop_global(t);
- } else {
- assert(_n == 1, "can't be zero.");
- return false;
- }
-}
-
-template<class T, MEMFLAGS F> bool
-GenericTaskQueueSet<T, F>::steal_1_random(uint queue_num, int* seed, E& t) {
- if (_n > 2) {
- uint k = queue_num;
- while (k == queue_num) k = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
- return _queues[2]->pop_global(t);
- } else if (_n == 2) {
- // Just try the other one.
- int k = (queue_num + 1) % 2;
- return _queues[k]->pop_global(t);
- } else {
- assert(_n == 1, "can't be zero.");
- return false;
- }
-}
-
-template<class T, MEMFLAGS F> bool
GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
if (_n > 2) {
uint k1 = queue_num;
--- a/hotspot/src/share/vm/utilities/vmError.cpp Wed Nov 07 15:32:13 2012 -0800
+++ b/hotspot/src/share/vm/utilities/vmError.cpp Wed Jul 05 18:28:20 2017 +0200
@@ -453,7 +453,9 @@
JDK_Version::current().to_string(buf, sizeof(buf));
const char* runtime_name = JDK_Version::runtime_name() != NULL ?
JDK_Version::runtime_name() : "";
- st->print_cr("# JRE version: %s (%s)", runtime_name, buf);
+ const char* runtime_version = JDK_Version::runtime_version() != NULL ?
+ JDK_Version::runtime_version() : "";
+ st->print_cr("# JRE version: %s (%s) (build %s)", runtime_name, buf, runtime_version);
st->print_cr("# Java VM: %s (%s %s %s %s)",
Abstract_VM_Version::vm_name(),
Abstract_VM_Version::vm_release(),