# HG changeset patch # User prr # Date 1519670194 28800 # Node ID 206a6f728ce5561d07c67ad3b9ea71ab697682d5 # Parent bec86eb4a71a683b48da2db2494b5ef23d1b3bc9# Parent 0b65c64c9db990a2fb56a8d8e37e9077014ee36b Merge diff -r bec86eb4a71a -r 206a6f728ce5 .hgtags --- a/.hgtags Fri Feb 23 12:30:03 2018 +0530 +++ b/.hgtags Mon Feb 26 10:36:34 2018 -0800 @@ -470,3 +470,4 @@ 4b62b815b4f49970b91a952929cf50115c263cb3 jdk-10+42 107413b070b92c88bde6230ceb4a19b579781068 jdk-10+43 dfa46cfe56346884a61efdc30dc50f7505d66761 jdk-11+1 +03ae177c26b016353e5ea1cab6ffd051dfa086ca jdk-11+2 diff -r bec86eb4a71a -r 206a6f728ce5 make/autoconf/basics.m4 --- a/make/autoconf/basics.m4 Fri Feb 23 12:30:03 2018 +0530 +++ b/make/autoconf/basics.m4 Mon Feb 26 10:36:34 2018 -0800 @@ -263,7 +263,7 @@ READLINK_TESTED=yes fi - if test "x$READLINK" != x && "x$READLINK_ISGNU" != x; then + if test "x$READLINK" != x && test "x$READLINK_ISGNU" != x; then $1=`$READLINK -f [$]$1` else # Save the current directory for restoring afterwards diff -r bec86eb4a71a -r 206a6f728ce5 make/common/TextFileProcessing.gmk --- a/make/common/TextFileProcessing.gmk Fri Feb 23 12:30:03 2018 +0530 +++ b/make/common/TextFileProcessing.gmk Mon Feb 26 10:36:34 2018 -0800 @@ -153,11 +153,11 @@ endif # Convert the REPLACEMENTS syntax ( A => B ; C => D ; ...) to a sed command - # line (-e "s/A/B/" -e "s/C/D/" ...), basically by replacing '=>' with '/' - # and ';' with '/" -e "s/', and adjusting for edge cases. - $1_REPLACEMENTS_COMMAND_LINE := $(SED) -e 's$$($1_SEP)$$(subst $$(SPACE);$$(SPACE),$$($1_SEP)' \ - -e 's$$($1_SEP),$$(subst $$(SPACE)=>$$(SPACE),$$($1_SEP),$$(subst $$(SPACE)=>$$(SPACE);$$(SPACE),$$($1_SEP)$$($1_SEP)' \ - -e 's$$($1_SEP),$$(strip $$($1_REPLACEMENTS)))))$$($1_SEP)' + # line (-e "s/A/B/g" -e "s/C/D/g" ...), basically by replacing '=>' with '/' + # and ';' with '/g" -e "s/', and adjusting for edge cases. + $1_REPLACEMENTS_COMMAND_LINE := $(SED) -e 's$$($1_SEP)$$(subst $$(SPACE);$$(SPACE),$$($1_SEP)g' \ + -e 's$$($1_SEP),$$(subst $$(SPACE)=>$$(SPACE),$$($1_SEP),$$(subst $$(SPACE)=>$$(SPACE);$$(SPACE),$$($1_SEP)$$($1_SEP)g' \ + -e 's$$($1_SEP),$$(strip $$($1_REPLACEMENTS)))))$$($1_SEP)g' else # We don't have any replacements, just pipe the file through cat. $1_REPLACEMENTS_COMMAND_LINE := $(CAT) diff -r bec86eb4a71a -r 206a6f728ce5 make/mapfiles/libjava/mapfile-vers --- a/make/mapfiles/libjava/mapfile-vers Fri Feb 23 12:30:03 2018 +0530 +++ b/make/mapfiles/libjava/mapfile-vers Mon Feb 26 10:36:34 2018 -0800 @@ -74,11 +74,12 @@ JNU_ThrowStringIndexOutOfBoundsException; JNU_ToString; - Java_java_io_FileDescriptor_cleanupClose0; + Java_java_io_FileCleanable_cleanupClose0; Java_java_io_FileDescriptor_close0; Java_java_io_FileDescriptor_initIDs; Java_java_io_FileDescriptor_sync; Java_java_io_FileDescriptor_getAppend; + Java_java_io_FileDescriptor_getHandle; Java_java_io_FileInputStream_available0; Java_java_io_FileInputStream_initIDs; Java_java_io_FileInputStream_open0; @@ -142,7 +143,6 @@ Java_java_lang_StackStreamFactory_checkStackWalkModes; Java_java_lang_StackStreamFactory_00024AbstractStackWalker_callStackWalk; Java_java_lang_StackStreamFactory_00024AbstractStackWalker_fetchStackFrames; - Java_java_lang_Shutdown_runAllFinalizers; Java_java_lang_StrictMath_IEEEremainder; Java_java_lang_StrictMath_acos; Java_java_lang_StrictMath_asin; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -277,6 +277,8 @@ // Add in the index add(result, result, tmp); load_heap_oop(result, Address(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT))); + // The resulting oop is null if the reference is not yet resolved. + // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy. } void InterpreterMacroAssembler::load_resolved_klass_at_offset( @@ -399,6 +401,13 @@ str(val, Address(esp, Interpreter::expr_offset_in_bytes(n))); } +void InterpreterMacroAssembler::load_float(Address src) { + ldrs(v0, src); +} + +void InterpreterMacroAssembler::load_double(Address src) { + ldrd(v0, src); +} void InterpreterMacroAssembler::prepare_to_jump_from_interpreted() { // set sender sp diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -158,6 +158,10 @@ void load_ptr(int n, Register val); void store_ptr(int n, Register val); +// Load float value from 'address'. The value is loaded onto the FPU register v0. + void load_float(Address src); + void load_double(Address src); + // Generate a subtype check: branch to ok_is_subtype if sub_klass is // a subtype of super_klass. void gen_subtype_check( Register sub_klass, Label &ok_is_subtype ); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -2056,9 +2056,14 @@ } void MacroAssembler::unimplemented(const char* what) { - char* b = new char[1024]; - jio_snprintf(b, 1024, "unimplemented: %s", what); - stop(b); + const char* buf = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("unimplemented: %s", what); + buf = code_string(ss.as_string()); + } + stop(buf); } // If a constant does not fit in an immediate field, generate some diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/aarch64/templateTable_aarch64.cpp --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -370,7 +370,7 @@ void TemplateTable::ldc(bool wide) { transition(vtos, vtos); - Label call_ldc, notFloat, notClass, Done; + Label call_ldc, notFloat, notClass, notInt, Done; if (wide) { __ get_unsigned_2_byte_index_at_bcp(r1, 1); @@ -417,20 +417,19 @@ __ b(Done); __ bind(notFloat); -#ifdef ASSERT - { - Label L; - __ cmp(r3, JVM_CONSTANT_Integer); - __ br(Assembler::EQ, L); - // String and Object are rewritten to fast_aldc - __ stop("unexpected tag type in ldc"); - __ bind(L); - } -#endif - // itos JVM_CONSTANT_Integer only + + __ cmp(r3, JVM_CONSTANT_Integer); + __ br(Assembler::NE, notInt); + + // itos __ adds(r1, r2, r1, Assembler::LSL, 3); __ ldrw(r0, Address(r1, base_offset)); __ push_i(r0); + __ b(Done); + + __ bind(notInt); + condy_helper(Done); + __ bind(Done); } @@ -441,6 +440,8 @@ Register result = r0; Register tmp = r1; + Register rarg = r2; + int index_size = wide ? sizeof(u2) : sizeof(u1); Label resolved; @@ -455,12 +456,27 @@ address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); // first time invocation - must resolve first - __ mov(tmp, (int)bytecode()); - __ call_VM(result, entry, tmp); + __ mov(rarg, (int)bytecode()); + __ call_VM(result, entry, rarg); __ bind(resolved); + { // Check for the null sentinel. + // If we just called the VM, that already did the mapping for us, + // but it's harmless to retry. + Label notNull; + + // Stash null_sentinel address to get its value later + __ movptr(rarg, (uintptr_t)Universe::the_null_sentinel_addr()); + __ ldr(tmp, Address(rarg)); + __ cmp(result, tmp); + __ br(Assembler::NE, notNull); + __ mov(result, 0); // NULL object reference + __ bind(notNull); + } + if (VerifyOops) { + // Safe to call with 0 result __ verify_oop(result); } } @@ -468,7 +484,7 @@ void TemplateTable::ldc2_w() { transition(vtos, vtos); - Label Long, Done; + Label notDouble, notLong, Done; __ get_unsigned_2_byte_index_at_bcp(r0, 1); __ get_cpool_and_tags(r1, r2); @@ -479,22 +495,143 @@ __ lea(r2, Address(r2, r0, Address::lsl(0))); __ load_unsigned_byte(r2, Address(r2, tags_offset)); __ cmpw(r2, (int)JVM_CONSTANT_Double); - __ br(Assembler::NE, Long); + __ br(Assembler::NE, notDouble); + // dtos __ lea (r2, Address(r1, r0, Address::lsl(3))); __ ldrd(v0, Address(r2, base_offset)); __ push_d(); __ b(Done); - __ bind(Long); + __ bind(notDouble); + __ cmpw(r2, (int)JVM_CONSTANT_Long); + __ br(Assembler::NE, notLong); + // ltos __ lea(r0, Address(r1, r0, Address::lsl(3))); __ ldr(r0, Address(r0, base_offset)); __ push_l(); + __ b(Done); + + __ bind(notLong); + condy_helper(Done); __ bind(Done); } +void TemplateTable::condy_helper(Label& Done) +{ + Register obj = r0; + Register rarg = r1; + Register flags = r2; + Register off = r3; + + address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); + + __ mov(rarg, (int) bytecode()); + __ call_VM(obj, entry, rarg); + + __ get_vm_result_2(flags, rthread); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + __ mov(off, flags); + __ andw(off, off, ConstantPoolCacheEntry::field_index_mask); + + const Address field(obj, off); + + // What sort of thing are we loading? + // x86 uses a shift and mask or wings it with a shift plus assert + // the mask is not needed. aarch64 just uses bitfield extract + __ ubfxw(flags, flags, ConstantPoolCacheEntry::tos_state_shift, + ConstantPoolCacheEntry::tos_state_bits); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notInt, notFloat, notShort, notByte, notChar, notBool; + __ cmpw(flags, itos); + __ br(Assembler::NE, notInt); + // itos + __ ldrw(r0, field); + __ push(itos); + __ b(Done); + + __ bind(notInt); + __ cmpw(flags, ftos); + __ br(Assembler::NE, notFloat); + // ftos + __ load_float(field); + __ push(ftos); + __ b(Done); + + __ bind(notFloat); + __ cmpw(flags, stos); + __ br(Assembler::NE, notShort); + // stos + __ load_signed_short(r0, field); + __ push(stos); + __ b(Done); + + __ bind(notShort); + __ cmpw(flags, btos); + __ br(Assembler::NE, notByte); + // btos + __ load_signed_byte(r0, field); + __ push(btos); + __ b(Done); + + __ bind(notByte); + __ cmpw(flags, ctos); + __ br(Assembler::NE, notChar); + // ctos + __ load_unsigned_short(r0, field); + __ push(ctos); + __ b(Done); + + __ bind(notChar); + __ cmpw(flags, ztos); + __ br(Assembler::NE, notBool); + // ztos + __ load_signed_byte(r0, field); + __ push(ztos); + __ b(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLong, notDouble; + __ cmpw(flags, ltos); + __ br(Assembler::NE, notLong); + // ltos + __ ldr(r0, field); + __ push(ltos); + __ b(Done); + + __ bind(notLong); + __ cmpw(flags, dtos); + __ br(Assembler::NE, notDouble); + // dtos + __ load_double(field); + __ push(dtos); + __ b(Done); + + __ bind(notDouble); + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); +} + void TemplateTable::locals_index(Register reg, int offset) { __ ldrb(reg, at_bcp(offset)); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/ppc/copy_ppc.hpp --- a/src/hotspot/cpu/ppc/copy_ppc.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/ppc/copy_ppc.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -32,11 +32,11 @@ // Inline functions for memory copy and fill. -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -52,7 +52,7 @@ } } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -70,25 +70,25 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } // Template for atomic, element-wise copy. template -static void copy_conjoint_atomic(T* from, T* to, size_t count) { +static void copy_conjoint_atomic(const T* from, T* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -104,44 +104,44 @@ } } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { // TODO: contribute optimized version. copy_conjoint_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { // TODO: contribute optimized version. copy_conjoint_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { copy_conjoint_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { copy_conjoint_atomic(from, to, count); } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_bytes_atomic(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { // TODO: contribute optimized version. - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { // TODO: contribute optimized version. - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp --- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -492,6 +492,8 @@ // Add in the index. add(result, tmp, result); load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null); + // The resulting oop is null if the reference is not yet resolved. + // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy. } // load cpool->resolved_klass_at(index) diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/ppc/templateTable_ppc_64.cpp --- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -314,7 +314,7 @@ Rcpool = R3_ARG1; transition(vtos, vtos); - Label notInt, notClass, exit; + Label notInt, notFloat, notClass, exit; __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. if (wide) { // Read index. @@ -356,13 +356,16 @@ __ align(32, 12); __ bind(notInt); -#ifdef ASSERT - // String and Object are rewritten to fast_aldc __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); - __ asm_assert_eq("unexpected type", 0x8765); -#endif + __ bne(CCR0, notFloat); __ lfsx(F15_ftos, Rcpool, Rscratch1); __ push(ftos); + __ b(exit); + + __ align(32, 12); + // assume the tag is for condy; if not, the VM runtime will tell us + __ bind(notFloat); + condy_helper(exit); __ align(32, 12); __ bind(exit); @@ -380,6 +383,19 @@ // non-null object (CallSite, etc.) __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null); + + // Convert null sentinel to NULL. + int simm16_rest = __ load_const_optimized(Rscratch, Universe::the_null_sentinel_addr(), R0, true); + __ ld(Rscratch, simm16_rest, Rscratch); + __ cmpld(CCR0, R17_tos, Rscratch); + if (VM_Version::has_isel()) { + __ isel_0(R17_tos, CCR0, Assembler::equal); + } else { + Label not_sentinel; + __ bne(CCR0, not_sentinel); + __ li(R17_tos, 0); + __ bind(not_sentinel); + } __ verify_oop(R17_tos); __ dispatch_epilog(atos, Bytecodes::length_for(bytecode())); @@ -395,7 +411,7 @@ void TemplateTable::ldc2_w() { transition(vtos, vtos); - Label Llong, Lexit; + Label not_double, not_long, exit; Register Rindex = R11_scratch1, Rcpool = R12_scratch2, @@ -410,23 +426,129 @@ __ addi(Rtag, Rtag, tags_offset); __ lbzx(Rtag, Rtag, Rindex); - __ sldi(Rindex, Rindex, LogBytesPerWord); + __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); - __ bne(CCR0, Llong); - // A double can be placed at word-aligned locations in the constant pool. - // Check out Conversions.java for an example. - // Also ConstantPool::header_size() is 20, which makes it very difficult - // to double-align double on the constant pool. SG, 11/7/97 + __ bne(CCR0, not_double); __ lfdx(F15_ftos, Rcpool, Rindex); __ push(dtos); - __ b(Lexit); - - __ bind(Llong); + __ b(exit); + + __ bind(not_double); + __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long); + __ bne(CCR0, not_long); __ ldx(R17_tos, Rcpool, Rindex); __ push(ltos); - - __ bind(Lexit); + __ b(exit); + + __ bind(not_long); + condy_helper(exit); + + __ align(32, 12); + __ bind(exit); +} + +void TemplateTable::condy_helper(Label& Done) { + const Register obj = R31; + const Register off = R11_scratch1; + const Register flags = R12_scratch2; + const Register rarg = R4_ARG2; + __ li(rarg, (int)bytecode()); + call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); + __ get_vm_result_2(flags); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + __ andi(off, flags, ConstantPoolCacheEntry::field_index_mask); + + // What sort of thing are we loading? + __ rldicl(flags, flags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notInt, notFloat, notShort, notByte, notChar, notBool; + __ cmplwi(CCR0, flags, itos); + __ bne(CCR0, notInt); + // itos + __ lwax(R17_tos, obj, off); + __ push(itos); + __ b(Done); + + __ bind(notInt); + __ cmplwi(CCR0, flags, ftos); + __ bne(CCR0, notFloat); + // ftos + __ lfsx(F15_ftos, obj, off); + __ push(ftos); + __ b(Done); + + __ bind(notFloat); + __ cmplwi(CCR0, flags, stos); + __ bne(CCR0, notShort); + // stos + __ lhax(R17_tos, obj, off); + __ push(stos); + __ b(Done); + + __ bind(notShort); + __ cmplwi(CCR0, flags, btos); + __ bne(CCR0, notByte); + // btos + __ lbzx(R17_tos, obj, off); + __ extsb(R17_tos, R17_tos); + __ push(btos); + __ b(Done); + + __ bind(notByte); + __ cmplwi(CCR0, flags, ctos); + __ bne(CCR0, notChar); + // ctos + __ lhzx(R17_tos, obj, off); + __ push(ctos); + __ b(Done); + + __ bind(notChar); + __ cmplwi(CCR0, flags, ztos); + __ bne(CCR0, notBool); + // ztos + __ lbzx(R17_tos, obj, off); + __ push(ztos); + __ b(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLong, notDouble; + __ cmplwi(CCR0, flags, ltos); + __ bne(CCR0, notLong); + // ltos + __ ldx(R17_tos, obj, off); + __ push(ltos); + __ b(Done); + + __ bind(notLong); + __ cmplwi(CCR0, flags, dtos); + __ bne(CCR0, notDouble); + // dtos + __ lfdx(F15_ftos, obj, off); + __ push(dtos); + __ b(Done); + + __ bind(notDouble); + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); } // Get the locals index located in the bytecode stream at bcp + offset. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/s390/copy_s390.hpp --- a/src/hotspot/cpu/s390/copy_s390.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/s390/copy_s390.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -73,7 +73,7 @@ #undef USE_INLINE_ASM -static void copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -89,7 +89,7 @@ } } -static void copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void copy_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -105,7 +105,7 @@ } } -static bool has_destructive_overlap(char* from, char* to, size_t byte_count) { +static bool has_destructive_overlap(const char* from, char* to, size_t byte_count) { return (from < to) && ((to-from) < (ptrdiff_t)byte_count); } @@ -662,7 +662,7 @@ // D I S J O I N T C O P Y I N G // //*************************************// -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: very frequent, some tests frequent. // Copy HeapWord (=DW) aligned storage. Use MVCLE in inline-asm code. @@ -740,13 +740,13 @@ #endif } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: < 4k calls. assert(((((size_t)from) & 0x07L) | (((size_t)to) & 0x07L)) == 0, "No atomic copy w/o aligned data"); pd_aligned_disjoint_words(from, to, count); // Rare calls -> just delegate. } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: very rare. pd_aligned_disjoint_words(from, to, count); // Rare calls -> just delegate. } @@ -756,7 +756,7 @@ // C O N J O I N T C O P Y I N G // //*************************************// -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // JVM2008: between some and lower end of frequent. #ifdef USE_INLINE_ASM @@ -836,13 +836,13 @@ #endif } -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { // Just delegate. HeapWords are optimally aligned anyway. pd_aligned_conjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; @@ -866,16 +866,16 @@ // C O N J O I N T A T O M I C C O P Y I N G // //**************************************************// -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { // Call arraycopy stubs to do the job. pd_conjoint_bytes(from, to, count); // bytes are always accessed atomically. } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; - if (has_destructive_overlap((char*)from, (char*)to, count_in*BytesPerShort)) { + if (has_destructive_overlap((const char*)from, (char*)to, count_in*BytesPerShort)) { // Use optimizations from shared code where no z-specific optimization exists. copy_conjoint_jshorts_atomic(from, to, count); } else { @@ -890,11 +890,11 @@ #endif } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; - if (has_destructive_overlap((char*)from, (char*)to, count_in*BytesPerInt)) { + if (has_destructive_overlap((const char*)from, (char*)to, count_in*BytesPerInt)) { switch (count_in) { case 4: COPY4_ATOMIC_4(to,from) return; @@ -922,7 +922,7 @@ #endif } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; @@ -970,11 +970,11 @@ } } else - pd_aligned_disjoint_words((HeapWord*)from, (HeapWord*)to, count_in); // rare calls -> just delegate. + pd_aligned_disjoint_words((const HeapWord*)from, (HeapWord*)to, count_in); // rare calls -> just delegate. #endif } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef USE_INLINE_ASM size_t count_in = count; @@ -1011,24 +1011,24 @@ #endif } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_bytes_atomic(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } //**********************************************// diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/s390/interp_masm_s390.cpp --- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -389,6 +389,8 @@ #endif z_agr(result, index); // Address of indexed array element. load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); + // The resulting oop is null if the reference is not yet resolved. + // It is Universe::the_null_sentinel() if the reference resolved to NULL via condy. } // load cpool->resolved_klass_at(index) diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/s390/templateTable_s390.cpp --- a/src/hotspot/cpu/s390/templateTable_s390.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -450,7 +450,7 @@ void TemplateTable::ldc(bool wide) { transition(vtos, vtos); - Label call_ldc, notFloat, notClass, Done; + Label call_ldc, notFloat, notClass, notInt, Done; const Register RcpIndex = Z_tmp_1; const Register Rtags = Z_ARG2; @@ -500,22 +500,17 @@ __ z_bru(Done); __ bind(notFloat); -#ifdef ASSERT - { - Label L; - - __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer); - __ z_bre(L); - // String and Object are rewritten to fast_aldc. - __ stop("unexpected tag type in ldc"); - - __ bind(L); - } -#endif + __ z_cli(0, Raddr_type, JVM_CONSTANT_Integer); + __ z_brne(notInt); // itos __ mem2reg_opt(Z_tos, Address(Z_tmp_2, RcpOffset, base_offset), false); __ push_i(Z_tos); + __ z_bru(Done); + + // assume the tag is for condy; if not, the VM runtime will tell us + __ bind(notInt); + condy_helper(Done); __ bind(Done); } @@ -528,15 +523,23 @@ const Register index = Z_tmp_2; int index_size = wide ? sizeof(u2) : sizeof(u1); - Label L_resolved; + Label L_do_resolve, L_resolved; // We are resolved if the resolved reference cache entry contains a // non-null object (CallSite, etc.). __ get_cache_index_at_bcp(index, 1, index_size); // Load index. __ load_resolved_reference_at_index(Z_tos, index); __ z_ltgr(Z_tos, Z_tos); + __ z_bre(L_do_resolve); + + // Convert null sentinel to NULL. + __ load_const_optimized(Z_R1_scratch, (intptr_t)Universe::the_null_sentinel_addr()); + __ z_cg(Z_tos, Address(Z_R1_scratch)); __ z_brne(L_resolved); - + __ clear_reg(Z_tos); + __ z_bru(L_resolved); + + __ bind(L_do_resolve); // First time invocation - must resolve first. address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); __ load_const_optimized(Z_ARG1, (int)bytecode()); @@ -548,7 +551,7 @@ void TemplateTable::ldc2_w() { transition(vtos, vtos); - Label Long, Done; + Label notDouble, notLong, Done; // Z_tmp_1 = index of cp entry __ get_2_byte_integer_at_bcp(Z_tmp_1, 1, InterpreterMacroAssembler::Unsigned); @@ -566,21 +569,132 @@ // Check type. __ z_cli(0, Z_tos, JVM_CONSTANT_Double); - __ z_brne(Long); - + __ z_brne(notDouble); // dtos __ mem2freg_opt(Z_ftos, Address(Z_tmp_2, Z_tmp_1, base_offset)); __ push_d(); __ z_bru(Done); - __ bind(Long); + __ bind(notDouble); + __ z_cli(0, Z_tos, JVM_CONSTANT_Long); + __ z_brne(notLong); // ltos __ mem2reg_opt(Z_tos, Address(Z_tmp_2, Z_tmp_1, base_offset)); __ push_l(); + __ z_bru(Done); + + __ bind(notLong); + condy_helper(Done); __ bind(Done); } +void TemplateTable::condy_helper(Label& Done) { + const Register obj = Z_tmp_1; + const Register off = Z_tmp_2; + const Register flags = Z_ARG1; + const Register rarg = Z_ARG2; + __ load_const_optimized(rarg, (int)bytecode()); + call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg); + __ get_vm_result_2(flags); + + // VMr = obj = base address to find primitive value to push + // VMr2 = flags = (tos, off) using format of CPCE::_flags + assert(ConstantPoolCacheEntry::field_index_mask == 0xffff, "or use other instructions"); + __ z_llghr(off, flags); + const Address field(obj, off); + + // What sort of thing are we loading? + __ z_srl(flags, ConstantPoolCacheEntry::tos_state_shift); + // Make sure we don't need to mask flags for tos_state after the above shift. + ConstantPoolCacheEntry::verify_tos_state_shift(); + + switch (bytecode()) { + case Bytecodes::_ldc: + case Bytecodes::_ldc_w: + { + // tos in (itos, ftos, stos, btos, ctos, ztos) + Label notInt, notFloat, notShort, notByte, notChar, notBool; + __ z_cghi(flags, itos); + __ z_brne(notInt); + // itos + __ z_l(Z_tos, field); + __ push(itos); + __ z_bru(Done); + + __ bind(notInt); + __ z_cghi(flags, ftos); + __ z_brne(notFloat); + // ftos + __ z_le(Z_ftos, field); + __ push(ftos); + __ z_bru(Done); + + __ bind(notFloat); + __ z_cghi(flags, stos); + __ z_brne(notShort); + // stos + __ z_lh(Z_tos, field); + __ push(stos); + __ z_bru(Done); + + __ bind(notShort); + __ z_cghi(flags, btos); + __ z_brne(notByte); + // btos + __ z_lb(Z_tos, field); + __ push(btos); + __ z_bru(Done); + + __ bind(notByte); + __ z_cghi(flags, ctos); + __ z_brne(notChar); + // ctos + __ z_llh(Z_tos, field); + __ push(ctos); + __ z_bru(Done); + + __ bind(notChar); + __ z_cghi(flags, ztos); + __ z_brne(notBool); + // ztos + __ z_lb(Z_tos, field); + __ push(ztos); + __ z_bru(Done); + + __ bind(notBool); + break; + } + + case Bytecodes::_ldc2_w: + { + Label notLong, notDouble; + __ z_cghi(flags, ltos); + __ z_brne(notLong); + // ltos + __ z_lg(Z_tos, field); + __ push(ltos); + __ z_bru(Done); + + __ bind(notLong); + __ z_cghi(flags, dtos); + __ z_brne(notDouble); + // dtos + __ z_ld(Z_ftos, field); + __ push(dtos); + __ z_bru(Done); + + __ bind(notDouble); + break; + } + + default: + ShouldNotReachHere(); + } + + __ stop("bad ldc/condy"); +} + void TemplateTable::locals_index(Register reg, int offset) { __ z_llgc(reg, at_bcp(offset)); __ z_lcgr(reg); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/sparc/copy_sparc.hpp --- a/src/hotspot/cpu/sparc/copy_sparc.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/sparc/copy_sparc.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,11 +27,11 @@ // Inline functions for memory copy and fill. -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -47,7 +47,7 @@ } } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -65,23 +65,23 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -97,7 +97,7 @@ } } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -113,12 +113,12 @@ } } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { // Do better than this: inline memmove body NEEDS CLEANUP if (from > to) { while (count-- > 0) { @@ -135,24 +135,24 @@ } } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_bytes_atomic(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } static void pd_fill_to_words(HeapWord* tohw, size_t count, juint value) { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/sparc/macroAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1411,9 +1411,14 @@ void MacroAssembler::unimplemented(const char* what) { - char* b = new char[1024]; - jio_snprintf(b, 1024, "unimplemented: %s", what); - stop(b); + const char* buf = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("unimplemented: %s", what); + buf = code_string(ss.as_string()); + } + stop(buf); } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/x86/macroAssembler_x86.cpp --- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -3660,9 +3660,14 @@ } void MacroAssembler::unimplemented(const char* what) { - char* b = new char[1024]; - jio_snprintf(b, 1024, "unimplemented: %s", what); - stop(b); + const char* buf = NULL; + { + ResourceMark rm; + stringStream ss; + ss.print("unimplemented: %s", what); + buf = code_string(ss.as_string()); + } + stop(buf); } #ifdef _LP64 diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/cpu/zero/copy_zero.hpp --- a/src/hotspot/cpu/zero/copy_zero.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/cpu/zero/copy_zero.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007 Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -28,11 +28,11 @@ // Inline functions for memory copy and fill. -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -49,7 +49,7 @@ } } -static void pd_disjoint_words_atomic(HeapWord* from, +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { @@ -70,73 +70,73 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { memmove(to, from, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { _Copy_conjoint_jints_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { _Copy_conjoint_jlongs_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef _LP64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else assert(BytesPerInt == BytesPerOop, "jints and oops must be the same size"); - _Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count); + _Copy_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // _LP64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jints(from, to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jlongs(from, to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef _LP64 diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os/linux/decoder_linux.cpp --- a/src/hotspot/os/linux/decoder_linux.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os/linux/decoder_linux.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,6 +24,7 @@ #include "jvm.h" #include "utilities/decoder_elf.hpp" +#include "utilities/elfFile.hpp" #include @@ -50,3 +51,38 @@ return false; } +// Returns true if the elf file is marked NOT to require an executable stack, +// or if the file could not be opened. +// Returns false if the elf file requires an executable stack, the stack flag +// is not set at all, or if the file can not be read. +bool ElfFile::specifies_noexecstack(const char* filepath) { + if (filepath == NULL) return true; + + FILE* file = fopen(filepath, "r"); + if (file == NULL) return true; + + // AARCH64 defaults to noexecstack. All others default to execstack. + bool result = AARCH64_ONLY(true) NOT_AARCH64(false); + + // Read file header + Elf_Ehdr head; + if (fread(&head, sizeof(Elf_Ehdr), 1, file) == 1 && + is_elf_file(head) && + fseek(file, head.e_phoff, SEEK_SET) == 0) { + + // Read program header table + Elf_Phdr phdr; + for (int index = 0; index < head.e_phnum; index ++) { + if (fread((void*)&phdr, sizeof(Elf_Phdr), 1, file) != 1) { + result = false; + break; + } + if (phdr.p_type == PT_GNU_STACK) { + result = (phdr.p_flags == (PF_R | PF_W)); + break; + } + } + } + fclose(file); + return result; +} diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os/windows/globals_windows.hpp --- a/src/hotspot/os/windows/globals_windows.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os/windows/globals_windows.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,10 +37,7 @@ notproduct, \ range, \ constraint, \ - writeable) \ - \ - product(bool, UseUTCFileTimestamp, true, \ - "Adjust the timestamp returned from stat() to be UTC") + writeable) // diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp --- a/src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os_cpu/bsd_x86/copy_bsd_x86.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef OS_CPU_BSD_X86_VM_COPY_BSD_X86_INLINE_HPP #define OS_CPU_BSD_X86_VM_COPY_BSD_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count * HeapWordSize); #else @@ -70,7 +70,7 @@ #endif // AMD64 } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -108,7 +108,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -132,15 +132,15 @@ #endif // AMD64 } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else @@ -219,25 +219,25 @@ #endif // AMD64 } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jints_atomic(from, to, count); #else assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jlongs_atomic(from, to, count); #else @@ -262,47 +262,47 @@ #endif // AMD64 } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jints(from, to, count); #else - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); #endif // AMD64 } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp --- a/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -99,7 +99,7 @@ : "memory", "cc"); \ } -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); if (__builtin_expect(count <= 8, 1)) { COPY_SMALL(from, to, count); @@ -108,7 +108,7 @@ _Copy_conjoint_words(from, to, count); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { if (__builtin_constant_p(count)) { memcpy(to, from, count * sizeof(HeapWord)); return; @@ -121,7 +121,7 @@ _Copy_disjoint_words(from, to, count); } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { __asm volatile( "prfm pldl1strm, [%[s], #0];" :: [s]"r"(from) : "memory"); if (__builtin_expect(count <= 8, 1)) { COPY_SMALL(from, to, count); @@ -130,56 +130,56 @@ _Copy_disjoint_words(from, to, count); } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { _Copy_conjoint_jints_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { _Copy_conjoint_jlongs_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jints(from, to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jlongs(from, to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { assert(!UseCompressedOops, "foo!"); assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp --- a/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP #define OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AARCH64 _Copy_conjoint_words(from, to, count * HeapWordSize); #else @@ -34,7 +34,7 @@ #endif } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AARCH64 _Copy_disjoint_words(from, to, count * HeapWordSize); #else @@ -42,27 +42,27 @@ #endif // AARCH64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { #ifdef AARCH64 _Copy_conjoint_jshorts_atomic(from, to, count * BytesPerShort); #else @@ -70,58 +70,58 @@ #endif } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef AARCH64 _Copy_conjoint_jints_atomic(from, to, count * BytesPerInt); #else assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AARCH64 assert(HeapWordSize == BytesPerLong, "64-bit architecture"); - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #else _Copy_conjoint_jlongs_atomic(to, from, count * BytesPerLong); #endif } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AARCH64 if (UseCompressedOops) { assert(BytesPerHeapOop == BytesPerInt, "compressed oops"); - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } else { assert(BytesPerHeapOop == BytesPerLong, "64-bit architecture"); - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } #else assert(BytesPerHeapOop == BytesPerInt, "32-bit architecture"); - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_bytes_atomic((void*)from, (void*)to, count); +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_bytes_atomic((const void*)from, (void*)to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } #endif // OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp --- a/src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os_cpu/linux_x86/copy_linux_x86.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP #define OS_CPU_LINUX_X86_VM_COPY_LINUX_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count * HeapWordSize); #else @@ -70,7 +70,7 @@ #endif // AMD64 } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -108,7 +108,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -132,15 +132,15 @@ #endif // AMD64 } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_conjoint_words(from, to, count); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else @@ -219,25 +219,25 @@ #endif // AMD64 } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jints_atomic(from, to, count); #else assert(HeapWordSize == BytesPerInt, "heapwords and jints must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AMD64 _Copy_conjoint_jlongs_atomic(from, to, count); #else @@ -262,47 +262,47 @@ #endif // AMD64 } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else assert(HeapWordSize == BytesPerOop, "heapwords and oops must be the same size"); // pd_conjoint_words is word-atomic in this implementation. - pd_conjoint_words((HeapWord*)from, (HeapWord*)to, count); + pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jints(from, to, count); #else - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); #endif // AMD64 } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp --- a/src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os_cpu/solaris_x86/copy_solaris_x86.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,11 @@ #ifndef OS_CPU_SOLARIS_X86_VM_COPY_SOLARIS_X86_INLINE_HPP #define OS_CPU_SOLARIS_X86_VM_COPY_SOLARIS_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifndef AMD64 (void)memcpy(to, from, count * HeapWordSize); #else @@ -50,7 +50,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -68,15 +68,15 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { #ifdef AMD64 (void)memmove(to, from, count); #else @@ -84,53 +84,53 @@ #endif // AMD64 } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { _Copy_conjoint_jshorts_atomic(from, to, count); } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { _Copy_conjoint_jints_atomic(from, to, count); } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { // Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't. _Copy_conjoint_jlongs_atomic(from, to, count); } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - _Copy_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + _Copy_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #else - _Copy_conjoint_jints_atomic((jint*)from, (jint*)to, count); + _Copy_conjoint_jints_atomic((const jint*)from, (jint*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_bytes(from, to, count); } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jshorts(from, to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { _Copy_arrayof_conjoint_jints(from, to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 _Copy_arrayof_conjoint_jlongs(from, to, count); #else - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); #endif // AMD64 } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); _Copy_arrayof_conjoint_jlongs(from, to, count); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp --- a/src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/os_cpu/windows_x86/copy_windows_x86.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,11 +25,11 @@ #ifndef OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP #define OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP -static void pd_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 switch (count) { case 8: to[7] = from[7]; @@ -50,7 +50,7 @@ #endif // AMD64 } -static void pd_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { +static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { switch (count) { case 8: to[7] = from[7]; case 7: to[6] = from[6]; @@ -68,23 +68,23 @@ } } -static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { (void)memmove(to, from, count * HeapWordSize); } -static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { +static void pd_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { pd_disjoint_words(from, to, count); } -static void pd_conjoint_bytes(void* from, void* to, size_t count) { +static void pd_conjoint_bytes(const void* from, void* to, size_t count) { (void)memmove(to, from, count); } -static void pd_conjoint_bytes_atomic(void* from, void* to, size_t count) { +static void pd_conjoint_bytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } -static void pd_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { +static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -100,7 +100,7 @@ } } -static void pd_conjoint_jints_atomic(jint* from, jint* to, size_t count) { +static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) { if (from > to) { while (count-- > 0) { // Copy forwards @@ -116,10 +116,10 @@ } } -static void pd_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { +static void pd_conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { #ifdef AMD64 assert(BytesPerLong == BytesPerOop, "jlongs and oops must be the same size"); - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); #else // Guarantee use of fild/fistp or xmm regs via some asm code, because compilers won't. __asm { @@ -149,7 +149,7 @@ #endif // AMD64 } -static void pd_conjoint_oops_atomic(oop* from, oop* to, size_t count) { +static void pd_conjoint_oops_atomic(const oop* from, oop* to, size_t count) { // Do better than this: inline memmove body NEEDS CLEANUP if (from > to) { while (count-- > 0) { @@ -166,7 +166,7 @@ } } -static void pd_arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { +static void pd_arrayof_conjoint_bytes(const HeapWord* from, HeapWord* to, size_t count) { #ifdef AMD64 pd_conjoint_bytes_atomic(from, to, count); #else @@ -174,20 +174,20 @@ #endif // AMD64 } -static void pd_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jshorts_atomic((jshort*)from, (jshort*)to, count); +static void pd_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jshorts_atomic((const jshort*)from, (jshort*)to, count); } -static void pd_arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); +static void pd_arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } -static void pd_arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_jlongs_atomic((jlong*)from, (jlong*)to, count); +static void pd_arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_jlongs_atomic((const jlong*)from, (jlong*)to, count); } -static void pd_arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { - pd_conjoint_oops_atomic((oop*)from, (oop*)to, count); +static void pd_arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { + pd_conjoint_oops_atomic((const oop*)from, (oop*)to, count); } #endif // OS_CPU_WINDOWS_X86_VM_COPY_WINDOWS_X86_INLINE_HPP diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/classfile/compactHashtable.cpp --- a/src/hotspot/share/classfile/compactHashtable.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/classfile/compactHashtable.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -146,27 +146,23 @@ cht->init(base_address, _num_entries, _num_buckets, _compact_buckets->data(), _compact_entries->data()); - if (log_is_enabled(Info, cds, hashtables)) { - ResourceMark rm; - LogMessage(cds, hashtables) msg; - stringStream info_stream; - + LogMessage(cds, hashtables) msg; + if (msg.is_info()) { double avg_cost = 0.0; if (_num_entries > 0) { avg_cost = double(table_bytes)/double(_num_entries); } - info_stream.print_cr("Shared %s table stats -------- base: " PTR_FORMAT, + msg.info("Shared %s table stats -------- base: " PTR_FORMAT, table_name, (intptr_t)base_address); - info_stream.print_cr("Number of entries : %9d", _num_entries); - info_stream.print_cr("Total bytes used : %9d", table_bytes); - info_stream.print_cr("Average bytes per entry : %9.3f", avg_cost); - info_stream.print_cr("Average bucket size : %9.3f", summary.avg()); - info_stream.print_cr("Variance of bucket size : %9.3f", summary.variance()); - info_stream.print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); - info_stream.print_cr("Empty buckets : %9d", _num_empty_buckets); - info_stream.print_cr("Value_Only buckets : %9d", _num_value_only_buckets); - info_stream.print_cr("Other buckets : %9d", _num_other_buckets); - msg.info("%s", info_stream.as_string()); + msg.info("Number of entries : %9d", _num_entries); + msg.info("Total bytes used : %9d", table_bytes); + msg.info("Average bytes per entry : %9.3f", avg_cost); + msg.info("Average bucket size : %9.3f", summary.avg()); + msg.info("Variance of bucket size : %9.3f", summary.variance()); + msg.info("Std. dev. of bucket size: %9.3f", summary.sd()); + msg.info("Empty buckets : %9d", _num_empty_buckets); + msg.info("Value_Only buckets : %9d", _num_value_only_buckets); + msg.info("Other buckets : %9d", _num_other_buckets); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/classfile/vmSymbols.hpp --- a/src/hotspot/share/classfile/vmSymbols.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/classfile/vmSymbols.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -358,7 +358,6 @@ template(reference_lock_name, "lock") \ template(reference_discovered_name, "discovered") \ template(run_finalization_name, "runFinalization") \ - template(run_finalizers_on_exit_name, "runFinalizersOnExit") \ template(dispatchUncaughtException_name, "dispatchUncaughtException") \ template(loadClass_name, "loadClass") \ template(loadClassInternal_name, "loadClassInternal") \ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/code/compiledMethod.cpp --- a/src/hotspot/share/code/compiledMethod.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/code/compiledMethod.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -439,11 +439,11 @@ } void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) { - OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock); + OrderAccess::release_store(&_unloading_clock, unloading_clock); } unsigned char CompiledMethod::unloading_clock() { - return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock); + return OrderAccess::load_acquire(&_unloading_clock); } // Processing of oop references should have been sufficient to keep diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/code/dependencyContext.cpp --- a/src/hotspot/share/code/dependencyContext.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/code/dependencyContext.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -270,5 +270,5 @@ #endif //PRODUCT int nmethodBucket::decrement() { - return Atomic::add(-1, (volatile int *)&_count); + return Atomic::sub(1, &_count); } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/cms/cmsHeap.hpp --- a/src/hotspot/share/gc/cms/cmsHeap.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/cms/cmsHeap.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,10 +75,6 @@ // supports. Caller does not hold the Heap_lock on entry. void collect(GCCause::Cause cause); - bool card_mark_must_follow_store() const { - return true; - } - void stop(); void safepoint_synchronize_begin(); void safepoint_synchronize_end(); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/collectionSetChooser.cpp --- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -138,7 +138,7 @@ G1PrintRegionLivenessInfoClosure cl("Post-Sorting"); for (uint i = 0; i < _end; ++i) { HeapRegion* r = regions_at(i); - cl.doHeapRegion(r); + cl.do_heap_region(r); } } verify(); @@ -220,7 +220,7 @@ _g1h(G1CollectedHeap::heap()), _cset_updater(hrSorted, true /* parallel */, chunk_size) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { // Do we have any marking information for this region? if (r->is_marked()) { // We will skip any region that's currently used as an old GC diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1Allocator.cpp --- a/src/hotspot/share/gc/g1/g1Allocator.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1Allocator.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -134,9 +134,6 @@ _old_is_full = true; } -G1PLAB::G1PLAB(size_t gclab_word_size) : - PLAB(gclab_word_size), _retired(true) { } - size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) { // Return the remaining space in the cur alloc region, but not less than // the min TLAB size. @@ -253,7 +250,7 @@ if ((required_in_plab <= plab_word_size) && may_throw_away_buffer(required_in_plab, plab_word_size)) { - G1PLAB* alloc_buf = alloc_buffer(dest, context); + PLAB* alloc_buf = alloc_buffer(dest, context); alloc_buf->retire(); size_t actual_plab_size = 0; @@ -304,7 +301,7 @@ void G1DefaultPLABAllocator::flush_and_retire_stats() { for (uint state = 0; state < InCSetState::Num; state++) { - G1PLAB* const buf = _alloc_buffers[state]; + PLAB* const buf = _alloc_buffers[state]; if (buf != NULL) { G1EvacStats* stats = _g1h->alloc_buffer_stats(state); buf->flush_and_retire_stats(stats); @@ -318,7 +315,7 @@ wasted = 0; undo_wasted = 0; for (uint state = 0; state < InCSetState::Num; state++) { - G1PLAB * const buf = _alloc_buffers[state]; + PLAB * const buf = _alloc_buffers[state]; if (buf != NULL) { wasted += buf->waste(); undo_wasted += buf->undo_waste(); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1Allocator.hpp --- a/src/hotspot/share/gc/g1/g1Allocator.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1Allocator.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -178,39 +178,6 @@ } }; -class G1PLAB: public PLAB { -private: - bool _retired; - -public: - G1PLAB(size_t gclab_word_size); - virtual ~G1PLAB() { - guarantee(_retired, "Allocation buffer has not been retired"); - } - - // The amount of space in words wasted within the PLAB including - // waste due to refills and alignment. - size_t wasted() const { return _wasted; } - - virtual void set_buf(HeapWord* buf, size_t word_size) { - PLAB::set_buf(buf, word_size); - _retired = false; - } - - virtual void retire() { - if (_retired) { - return; - } - PLAB::retire(); - _retired = true; - } - - virtual void flush_and_retire_stats(PLABStats* stats) { - PLAB::flush_and_retire_stats(stats); - _retired = true; - } -}; - // Manages the PLABs used during garbage collection. Interface for allocation from PLABs. // Needs to handle multiple contexts, extra alignment in any "survivor" area and some // statistics. @@ -231,7 +198,7 @@ size_t _direct_allocated[InCSetState::Num]; virtual void flush_and_retire_stats() = 0; - virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0; + virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0; // Calculate the survivor space object alignment in bytes. Returns that or 0 if // there are no restrictions on survivor alignment. @@ -292,14 +259,14 @@ // The default PLAB allocator for G1. Keeps the current (single) PLAB for survivor // and old generation allocation. class G1DefaultPLABAllocator : public G1PLABAllocator { - G1PLAB _surviving_alloc_buffer; - G1PLAB _tenured_alloc_buffer; - G1PLAB* _alloc_buffers[InCSetState::Num]; + PLAB _surviving_alloc_buffer; + PLAB _tenured_alloc_buffer; + PLAB* _alloc_buffers[InCSetState::Num]; public: G1DefaultPLABAllocator(G1Allocator* _allocator); - virtual G1PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) { + virtual PLAB* alloc_buffer(InCSetState dest, AllocationContext_t context) { assert(dest.is_valid(), "Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()); assert(_alloc_buffers[dest.value()] != NULL, diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1Allocator.inline.hpp --- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -47,7 +47,7 @@ inline HeapWord* G1PLABAllocator::plab_allocate(InCSetState dest, size_t word_sz, AllocationContext_t context) { - G1PLAB* buffer = alloc_buffer(dest, context); + PLAB* buffer = alloc_buffer(dest, context); if (_survivor_alignment_bytes == 0 || !dest.is_young()) { return buffer->allocate(word_sz); } else { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1CardCounts.cpp --- a/src/hotspot/share/gc/g1/g1CardCounts.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -144,7 +144,7 @@ HeapRegionClosure(), _card_counts(card_counts) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _card_counts->clear_region(r); return false; } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1CardLiveData.cpp --- a/src/hotspot/share/gc/g1/g1CardLiveData.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CardLiveData.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -285,7 +285,7 @@ _mark_bitmap(mark_bitmap), _cm(cm) { } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { size_t marked_bytes = _helper.mark_marked_during_marking(_mark_bitmap, hr); if (marked_bytes > 0) { hr->add_to_marked_bytes(marked_bytes); @@ -352,7 +352,7 @@ _helper(live_data, g1h->reserved_region().start()), _gc_timestamp_at_create(live_data->gc_timestamp_at_create()) { } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { if (has_been_reclaimed(hr)) { _helper.reset_live_data(hr); } @@ -478,7 +478,7 @@ int failures() const { return _failures; } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { int failures = 0; // Walk the marking bitmap for this region and set the corresponding bits diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1010,7 +1010,7 @@ private: G1HRPrinter* _hr_printer; public: - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { assert(!hr->is_young(), "not expecting to find young regions"); _hr_printer->post_compaction(hr); return false; @@ -1573,7 +1573,6 @@ } jint G1CollectedHeap::initialize() { - CollectedHeap::pre_initialize(); os::enable_vtime(); // Necessary to satisfy locking discipline assertions. @@ -1917,7 +1916,7 @@ CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) : _gc_time_stamp(gc_time_stamp), _failures(false) { } - virtual bool doHeapRegion(HeapRegion* hr) { + virtual bool do_heap_region(HeapRegion* hr) { unsigned region_gc_time_stamp = hr->get_gc_time_stamp(); if (_gc_time_stamp != region_gc_time_stamp) { log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr), @@ -1969,7 +1968,7 @@ size_t _used; public: SumUsedClosure() : _used(0) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { _used += r->used(); return false; } @@ -2188,7 +2187,7 @@ ObjectClosure* _cl; public: IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (!r->is_continues_humongous()) { r->object_iterate(_cl); } @@ -2303,7 +2302,7 @@ outputStream* _st; public: PrintRegionClosure(outputStream* st) : _st(st) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { r->print_on(_st); return false; } @@ -2422,7 +2421,7 @@ size_t _occupied_sum; public: - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); size_t occupied = hrrs->occupied(); _occupied_sum += occupied; @@ -2669,7 +2668,7 @@ _dcq(&JavaThread::dirty_card_queue_set()) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (!r->is_starts_humongous()) { return false; } @@ -2745,7 +2744,7 @@ class VerifyRegionRemSetClosure : public HeapRegionClosure { public: - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { if (!hr->is_archive() && !hr->is_continues_humongous()) { hr->verify_rem_set(); } @@ -2815,7 +2814,7 @@ public: G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _hr_printer->cset(r); return false; } @@ -4505,7 +4504,7 @@ _local_free_list("Local Region List for CSet Freeing") { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index()); @@ -4628,7 +4627,7 @@ public: G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _work_items[_cur_idx++] = WorkItem(r); return false; } @@ -4762,7 +4761,7 @@ _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (!r->is_starts_humongous()) { return false; } @@ -4897,7 +4896,7 @@ class G1AbandonCollectionSetClosure : public HeapRegionClosure { public: - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index()); G1CollectedHeap::heap()->clear_in_cset(r); r->set_young_index_in_cset(-1); @@ -4967,7 +4966,7 @@ bool _success; public: NoYoungRegionsClosure() : _success(true) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_young()) { log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young", p2i(r->bottom()), p2i(r->end())); @@ -4997,7 +4996,7 @@ public: TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_old()) { _old_set->remove(r); } else if(r->is_young()) { @@ -5065,7 +5064,7 @@ } } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_empty()) { // Add free regions to the free list r->set_free(); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1CollectedHeap.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1197,7 +1197,7 @@ } // Iterate over heap regions, in address order, terminating the - // iteration early if the "doHeapRegion" method returns "true". + // iteration early if the "do_heap_region" method returns "true". void heap_region_iterate(HeapRegionClosure* blk) const; // Return the region with the given index. It assumes the index is valid. @@ -1272,36 +1272,8 @@ size_t max_tlab_size() const; size_t unsafe_max_tlab_alloc(Thread* ignored) const; - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. If such permission - // is granted for this heap type, the compiler promises to call - // defer_store_barrier() below on any slow path allocation of - // a new object for which such initializing store barriers will - // have been elided. G1, like CMS, allows this, but should be - // ready to provide a compensating write barrier as necessary - // if that storage came out of a non-young region. The efficiency - // of this implementation depends crucially on being able to - // answer very efficiently in constant time whether a piece of - // storage in the heap comes from a young region or not. - // See ReduceInitialCardMarks. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - virtual bool card_mark_must_follow_store() const { - return true; - } - inline bool is_in_young(const oop obj); - // We don't need barriers for initializing stores to objects - // in the young gen: for the SATB pre-barrier, there is no - // pre-value that needs to be remembered; for the remembered-set - // update logging post-barrier, we don't maintain remembered set - // information for young gen objects. - virtual inline bool can_elide_initializing_store_barrier(oop new_obj); - // Returns "true" iff the given word_size is "very large". static bool is_humongous(size_t word_size) { // Note this has to be strictly greater-than as the TLABs diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -241,15 +241,6 @@ return heap_region_containing(obj)->is_young(); } -// We don't need barriers for initializing stores to objects -// in the young gen: for the SATB pre-barrier, there is no -// pre-value that needs to be remembered; for the remembered-set -// update logging post-barrier, we don't maintain remembered set -// information for young gen objects. -inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { - return is_in_young(new_obj); -} - inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { if (obj == NULL) { return false; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1CollectionSet.cpp --- a/src/hotspot/share/gc/g1/g1CollectionSet.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1CollectionSet.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -186,9 +186,9 @@ do { HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]); - bool result = cl->doHeapRegion(r); + bool result = cl->do_heap_region(r); if (result) { - cl->incomplete(); + cl->set_incomplete(); return; } cur_pos++; @@ -292,7 +292,7 @@ public: G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str()); SurvRateGroup* group = r->surv_rate_group(); @@ -332,7 +332,7 @@ public: G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index()); _st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d", HR_FORMAT_PARAMS(r), @@ -524,7 +524,7 @@ FREE_C_HEAP_ARRAY(int, _heap_region_indices); } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { const int idx = r->young_index_in_cset(); assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index()); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1ConcurrentMark.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -591,7 +591,7 @@ G1ClearBitmapHRClosure(G1CMBitMap* bitmap, G1ConcurrentMark* cm) : HeapRegionClosure(), _cm(cm), _bitmap(bitmap) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { size_t const chunk_size_in_words = G1ClearBitMapTask::chunk_size() / HeapWordSize; HeapWord* cur = r->bottom(); @@ -638,7 +638,7 @@ } bool is_complete() { - return _cl.complete(); + return _cl.is_complete(); } }; @@ -694,7 +694,7 @@ CheckBitmapClearHRClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { // This closure can be called concurrently to the mutator, so we must make sure // that the result of the getNextMarkedWordAddress() call is compared to the // value passed to it as limit to detect any found bits. @@ -707,12 +707,12 @@ bool G1ConcurrentMark::next_mark_bitmap_is_clear() { CheckBitmapClearHRClosure cl(_next_mark_bitmap); _g1h->heap_region_iterate(&cl); - return cl.complete(); + return cl.is_complete(); } class NoteStartOfMarkHRClosure: public HeapRegionClosure { public: - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { r->note_start_of_marking(); return false; } @@ -1094,7 +1094,7 @@ const uint old_regions_removed() { return _old_regions_removed; } const uint humongous_regions_removed() { return _humongous_regions_removed; } - bool doHeapRegion(HeapRegion *hr) { + bool do_heap_region(HeapRegion *hr) { _g1->reset_gc_time_stamps(hr); hr->note_end_of_marking(); @@ -1135,7 +1135,7 @@ G1NoteEndOfConcMarkClosure g1_note_end(_g1h, &local_cleanup_list, &hrrs_cleanup_task); _g1h->heap_region_par_iterate_from_worker_offset(&g1_note_end, &_hrclaimer, worker_id); - assert(g1_note_end.complete(), "Shouldn't have yielded!"); + assert(g1_note_end.is_complete(), "Shouldn't have yielded!"); // Now update the lists _g1h->remove_from_old_sets(g1_note_end.old_regions_removed(), g1_note_end.humongous_regions_removed()); @@ -2922,7 +2922,7 @@ "(bytes)", "(bytes)"); } -bool G1PrintRegionLivenessInfoClosure::doHeapRegion(HeapRegion* r) { +bool G1PrintRegionLivenessInfoClosure::do_heap_region(HeapRegion* r) { const char* type = r->get_type_str(); HeapWord* bottom = r->bottom(); HeapWord* end = r->end(); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1ConcurrentMark.hpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -848,7 +848,7 @@ // The header and footer are printed in the constructor and // destructor respectively. G1PrintRegionLivenessInfoClosure(const char* phase_name); - virtual bool doHeapRegion(HeapRegion* r); + virtual bool do_heap_region(HeapRegion* r); ~G1PrintRegionLivenessInfoClosure(); }; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1EvacFailure.cpp --- a/src/hotspot/share/gc/g1/g1EvacFailure.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1EvacFailure.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -220,7 +220,7 @@ return rspc.marked_bytes(); } - bool doHeapRegion(HeapRegion *hr) { + bool do_heap_region(HeapRegion *hr) { assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index()); assert(hr->in_collection_set(), "bad CS"); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1FullGCAdjustTask.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -56,7 +56,7 @@ _bitmap(bitmap), _worker_id(worker_id) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { G1AdjustAndRebuildClosure cl(_worker_id); if (r->is_humongous()) { oop obj = oop(r->humongous_start_region()->bottom()); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1FullGCCompactTask.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -40,7 +40,7 @@ G1ResetHumongousClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { } - bool doHeapRegion(HeapRegion* current) { + bool do_heap_region(HeapRegion* current) { if (current->is_humongous()) { if (current->is_starts_humongous()) { oop obj = oop(current->bottom()); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -37,7 +37,7 @@ #include "logging/log.hpp" #include "utilities/ticks.inline.hpp" -bool G1FullGCPrepareTask::G1CalculatePointersClosure::doHeapRegion(HeapRegion* hr) { +bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion* hr) { if (hr->is_humongous()) { oop obj = oop(hr->humongous_start_region()->bottom()); if (_bitmap->is_marked(obj)) { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp --- a/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1FullGCPrepareTask.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -67,7 +67,7 @@ G1FullGCCompactionPoint* cp); void update_sets(); - bool doHeapRegion(HeapRegion* hr); + bool do_heap_region(HeapRegion* hr); bool freed_regions(); }; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1HeapTransition.cpp --- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -58,7 +58,7 @@ class DetailedUsageClosure: public HeapRegionClosure { public: DetailedUsage _usage; - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_old()) { _usage._old_used += r->used(); _usage._old_region_count++; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1HeapVerifier.cpp --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -273,7 +273,7 @@ G1CollectedHeap* _g1h; public: VerifyArchivePointerRegionClosure(G1CollectedHeap* g1h) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (r->is_archive()) { VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); r->object_iterate(&verify_oop_pointers); @@ -306,7 +306,7 @@ return _failures; } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { // For archive regions, verify there are no heap pointers to // non-pinned regions. For all others, verify liveness info. if (r->is_closed_archive()) { @@ -498,7 +498,7 @@ _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), _old_count(), _humongous_count(), _free_count(){ } - bool doHeapRegion(HeapRegion* hr) { + bool do_heap_region(HeapRegion* hr) { if (hr->is_young()) { // TODO } else if (hr->is_humongous()) { @@ -608,7 +608,7 @@ public: G1VerifyCardTableCleanup(G1HeapVerifier* verifier, G1SATBCardTableModRefBS* ct_bs) : _verifier(verifier), _ct_bs(ct_bs) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { if (r->is_survivor()) { _verifier->verify_dirty_region(r); } else { @@ -654,7 +654,7 @@ G1HeapVerifier* _verifier; public: G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { _verifier->verify_dirty_region(r); return false; } @@ -721,7 +721,7 @@ bool failures() { return _failures; } - virtual bool doHeapRegion(HeapRegion* hr) { + virtual bool do_heap_region(HeapRegion* hr) { bool result = _verifier->verify_bitmaps(_caller, hr); if (!result) { _failures = true; @@ -744,7 +744,7 @@ public: G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { } - virtual bool doHeapRegion(HeapRegion* hr) { + virtual bool do_heap_region(HeapRegion* hr) { uint i = hr->hrm_index(); InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i); if (hr->is_humongous()) { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1ParScanThreadState.cpp --- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -206,7 +206,7 @@ oop const old, size_t word_sz, uint age, HeapWord * const obj_ptr, const AllocationContext_t context) const { - G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context); + PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context); if (alloc_buf->contains(obj_ptr)) { _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, dest_state.value() == InCSetState::Old, diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1RemSet.cpp --- a/src/hotspot/share/gc/g1/g1RemSet.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1RemSet.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -127,7 +127,7 @@ public: G1ResetScanTopClosure(HeapWord** scan_top) : _scan_top(scan_top) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { uint hrm_index = r->hrm_index(); if (!r->in_collection_set() && r->is_old_or_humongous()) { _scan_top[hrm_index] = r->top(); @@ -204,7 +204,7 @@ if (_iter_states[region] != Unclaimed) { return false; } - jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed); + G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed); return (res == Unclaimed); } @@ -214,7 +214,7 @@ if (iter_is_complete(region)) { return false; } - jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed); + G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed); return (res == Claimed); } @@ -349,7 +349,7 @@ _scan_state->add_dirty_region(region_idx_for_card); } -bool G1ScanRSForRegionClosure::doHeapRegion(HeapRegion* r) { +bool G1ScanRSForRegionClosure::do_heap_region(HeapRegion* r) { assert(r->in_collection_set(), "should only be called on elements of CS."); uint region_idx = r->hrm_index(); @@ -522,7 +522,7 @@ _g1h(G1CollectedHeap::heap()), _live_data(live_data) { } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (!r->is_continues_humongous()) { r->rem_set()->scrub(_live_data); } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1RemSet.hpp --- a/src/hotspot/share/gc/g1/g1RemSet.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -176,7 +176,7 @@ CodeBlobClosure* code_root_cl, uint worker_i); - bool doHeapRegion(HeapRegion* r); + bool do_heap_region(HeapRegion* r); double strong_code_root_scan_time_sec() { return _strong_code_root_scan_time_sec; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1RemSetSummary.cpp --- a/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1RemSetSummary.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -252,7 +252,7 @@ _max_rs_mem_sz(0), _max_code_root_mem_sz(0) {} - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { HeapRegionRemSet* hrrs = r->rem_set(); // HeapRegionRemSet::mem_size() includes the diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -131,6 +131,7 @@ } void G1SATBCardTableLoggingModRefBS::initialize(G1RegionToSpaceMapper* mapper) { + initialize_deferred_card_mark_barriers(); mapper->set_mapping_changed_listener(&_listener); _byte_map_size = mapper->reserved().byte_size(); @@ -213,3 +214,14 @@ } } } + +bool G1SATBCardTableModRefBS::is_in_young(oop obj) const { + volatile jbyte* p = byte_for((void*)obj); + return *p == g1_young_card_val(); +} + +void G1SATBCardTableLoggingModRefBS::flush_deferred_barriers(JavaThread* thread) { + CardTableModRefBS::flush_deferred_barriers(thread); + thread->satb_mark_queue().flush(); + thread->dirty_card_queue().flush(); +} diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -92,6 +92,8 @@ jbyte val = _byte_map[card_index]; return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); } + + virtual bool is_in_young(oop obj) const; }; template<> @@ -145,13 +147,19 @@ // above no longer applies. void invalidate(MemRegion mr); - void write_region_work(MemRegion mr) { invalidate(mr); } + void write_region(MemRegion mr) { invalidate(mr); } void write_ref_array_work(MemRegion mr) { invalidate(mr); } template void write_ref_field_post(T* field, oop new_val); void write_ref_field_post_slow(volatile jbyte* byte); + virtual void flush_deferred_barriers(JavaThread* thread); + + virtual bool card_mark_must_follow_store() const { + return true; + } + // Callbacks for runtime accesses. template class AccessBarrier: public ModRefBarrierSet::AccessBarrier { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp --- a/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1SATBCardTableModRefBS.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -30,7 +30,7 @@ template inline void G1SATBCardTableModRefBS::write_ref_field_pre(T* field) { - if (HasDecorator::value || + if (HasDecorator::value || HasDecorator::value) { return; } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp --- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -79,7 +79,7 @@ G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) : HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { } - virtual bool doHeapRegion(HeapRegion* r) { + virtual bool do_heap_region(HeapRegion* r) { size_t rs_length = r->rem_set()->occupied(); _sampled_rs_lengths += rs_length; @@ -114,7 +114,7 @@ G1CollectionSet* g1cs = g1h->collection_set(); g1cs->iterate(&cl); - if (cl.complete()) { + if (cl.is_complete()) { g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths()); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/heapRegion.hpp --- a/src/hotspot/share/gc/g1/heapRegion.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/heapRegion.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -719,23 +719,23 @@ }; // HeapRegionClosure is used for iterating over regions. -// Terminates the iteration when the "doHeapRegion" method returns "true". +// Terminates the iteration when the "do_heap_region" method returns "true". class HeapRegionClosure : public StackObj { friend class HeapRegionManager; friend class G1CollectionSet; - bool _complete; - void incomplete() { _complete = false; } + bool _is_complete; + void set_incomplete() { _is_complete = false; } public: - HeapRegionClosure(): _complete(true) {} + HeapRegionClosure(): _is_complete(true) {} // Typically called on each region until it returns true. - virtual bool doHeapRegion(HeapRegion* r) = 0; + virtual bool do_heap_region(HeapRegion* r) = 0; // True after iteration if the closure was applied to all heap regions // and returned "false" in all cases. - bool complete() { return _complete; } + bool is_complete() { return _is_complete; } }; #endif // SHARE_VM_GC_G1_HEAPREGION_HPP diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/heapRegionManager.cpp --- a/src/hotspot/share/gc/g1/heapRegionManager.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -242,9 +242,9 @@ continue; } guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*", i); - bool res = blk->doHeapRegion(at(i)); + bool res = blk->do_heap_region(at(i)); if (res) { - blk->incomplete(); + blk->set_incomplete(); return; } } @@ -353,7 +353,7 @@ if (!hrclaimer->claim_region(index)) { continue; } - bool res = blk->doHeapRegion(r); + bool res = blk->do_heap_region(r); if (res) { return; } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/g1/heapRegionManager.hpp --- a/src/hotspot/share/gc/g1/heapRegionManager.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -236,8 +236,8 @@ // and not free, and return the number of regions newly committed in commit_count. bool allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers); - // Apply blk->doHeapRegion() on all committed regions in address order, - // terminating the iteration early if doHeapRegion() returns true. + // Apply blk->do_heap_region() on all committed regions in address order, + // terminating the iteration early if do_heap_region() returns true. void iterate(HeapRegionClosure* blk) const; void par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/parallel/cardTableExtension.cpp --- a/src/hotspot/share/gc/parallel/cardTableExtension.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/parallel/cardTableExtension.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,7 +26,7 @@ #include "gc/parallel/cardTableExtension.hpp" #include "gc/parallel/gcTaskManager.hpp" #include "gc/parallel/objectStartArray.inline.hpp" -#include "gc/parallel/parallelScavengeHeap.hpp" +#include "gc/parallel/parallelScavengeHeap.inline.hpp" #include "gc/parallel/psPromotionManager.inline.hpp" #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psTasks.hpp" @@ -677,3 +677,7 @@ } return min_start; } + +bool CardTableExtension::is_in_young(oop obj) const { + return ParallelScavengeHeap::heap()->is_in_young(obj); +} diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/parallel/cardTableExtension.hpp --- a/src/hotspot/share/gc/parallel/cardTableExtension.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/parallel/cardTableExtension.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -108,6 +108,13 @@ } #endif // ASSERT + + // ReduceInitialCardMarks support + virtual bool is_in_young(oop obj) const; + + virtual bool card_mark_must_follow_store() const { + return false; + } }; template<> diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,8 +57,6 @@ GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; jint ParallelScavengeHeap::initialize() { - CollectedHeap::pre_initialize(); - const size_t heap_size = _collector_policy->max_heap_byte_size(); ReservedSpace heap_rs = Universe::reserve_heap(heap_size, _collector_policy->heap_alignment()); @@ -490,13 +488,6 @@ CollectedHeap::resize_all_tlabs(); } -bool ParallelScavengeHeap::can_elide_initializing_store_barrier(oop new_obj) { - // We don't need barriers for stores to objects in the - // young gen and, a fortiori, for initializing stores to - // objects therein. - return is_in_young(new_obj); -} - // This method is used by System.gc() and JVMTI. void ParallelScavengeHeap::collect(GCCause::Cause cause) { assert(!Heap_lock->owned_by_self(), @@ -719,4 +710,3 @@ memory_pools.append(_old_pool); return memory_pools; } - diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -205,21 +205,6 @@ size_t tlab_used(Thread* thr) const; size_t unsafe_max_tlab_alloc(Thread* thr) const; - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - virtual bool card_mark_must_follow_store() const { - return false; - } - - // Return true if we don't we need a store barrier for - // initializing stores to an object at this address. - virtual bool can_elide_initializing_store_barrier(oop new_obj); - void object_iterate(ObjectClosure* cl); void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/parallel/psParallelCompact.hpp --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -538,7 +538,7 @@ { assert(_dc_and_los < dc_claimed, "already claimed"); assert(_dc_and_los >= dc_one, "count would go negative"); - Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los); + Atomic::add(dc_mask, &_dc_and_los); } inline HeapWord* ParallelCompactData::RegionData::data_location() const @@ -578,7 +578,7 @@ inline void ParallelCompactData::RegionData::add_live_obj(size_t words) { assert(words <= (size_t)los_mask - live_obj_size(), "overflow"); - Atomic::add((int) words, (volatile int*) &_dc_and_los); + Atomic::add(static_cast(words), &_dc_and_los); } inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr) diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/serial/serialHeap.hpp --- a/src/hotspot/share/gc/serial/serialHeap.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/serial/serialHeap.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,10 +61,6 @@ virtual bool is_in_closed_subset(const void* p) const { return is_in(p); } - - virtual bool card_mark_must_follow_store() const { - return false; - } }; #endif // SHARE_VM_GC_CMS_CMSHEAP_HPP diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/barrierSet.hpp --- a/src/hotspot/share/gc/shared/barrierSet.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/barrierSet.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -32,6 +32,8 @@ #include "oops/oopsHierarchy.hpp" #include "utilities/fakeRttiSupport.hpp" +class JavaThread; + // This class provides the interface between a barrier implementation and // the rest of the system. @@ -107,19 +109,19 @@ static void static_write_ref_array_pre(HeapWord* start, size_t count); static void static_write_ref_array_post(HeapWord* start, size_t count); + // Support for optimizing compilers to call the barrier set on slow path allocations + // that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks. + // The allocation is safe to use iff it returns true. If not, the slow-path allocation + // is redone until it succeeds. This can e.g. prevent allocations from the slow path + // to be in old. + virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {} + virtual void flush_deferred_barriers(JavaThread* thread) {} + virtual void make_parsable(JavaThread* thread) {} + protected: virtual void write_ref_array_work(MemRegion mr) = 0; public: - // (For efficiency reasons, this operation is specialized for certain - // barrier types. Semantically, it should be thought of as a call to the - // virtual "_work" function below, which must implement the barrier.) - void write_region(MemRegion mr); - -protected: - virtual void write_region_work(MemRegion mr) = 0; - -public: // Inform the BarrierSet that the the covered heap region that starts // with "base" has been changed to have the given size (possibly from 0, // for initialization.) diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/barrierSet.inline.hpp --- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/barrierSet.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,8 +52,4 @@ write_ref_array_work(MemRegion(aligned_start, aligned_end)); } -inline void BarrierSet::write_region(MemRegion mr) { - write_region_work(mr); -} - #endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/cardTableModRefBS.cpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ #include "logging/log.hpp" #include "memory/virtualspace.hpp" #include "oops/oop.inline.hpp" +#include "runtime/thread.hpp" #include "services/memTracker.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" @@ -61,7 +62,8 @@ _committed(NULL), _cur_covered_regions(0), _byte_map(NULL), - byte_map_base(NULL) + byte_map_base(NULL), + _defer_initial_card_mark(false) { assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary"); assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary"); @@ -75,6 +77,7 @@ } void CardTableModRefBS::initialize() { + initialize_deferred_card_mark_barriers(); _guard_index = cards_required(_whole_heap.word_size()) - 1; _last_valid_index = _guard_index - 1; @@ -521,3 +524,112 @@ st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] byte_map_base: " INTPTR_FORMAT, p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(byte_map_base)); } + +// Helper for ReduceInitialCardMarks. For performance, +// compiled code may elide card-marks for initializing stores +// to a newly allocated object along the fast-path. We +// compensate for such elided card-marks as follows: +// (a) Generational, non-concurrent collectors, such as +// GenCollectedHeap(ParNew,DefNew,Tenured) and +// ParallelScavengeHeap(ParallelGC, ParallelOldGC) +// need the card-mark if and only if the region is +// in the old gen, and do not care if the card-mark +// succeeds or precedes the initializing stores themselves, +// so long as the card-mark is completed before the next +// scavenge. For all these cases, we can do a card mark +// at the point at which we do a slow path allocation +// in the old gen, i.e. in this call. +// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires +// in addition that the card-mark for an old gen allocated +// object strictly follow any associated initializing stores. +// In these cases, the memRegion remembered below is +// used to card-mark the entire region either just before the next +// slow-path allocation by this thread or just before the next scavenge or +// CMS-associated safepoint, whichever of these events happens first. +// (The implicit assumption is that the object has been fully +// initialized by this point, a fact that we assert when doing the +// card-mark.) +// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a +// G1 concurrent marking is in progress an SATB (pre-write-)barrier +// is used to remember the pre-value of any store. Initializing +// stores will not need this barrier, so we need not worry about +// compensating for the missing pre-barrier here. Turning now +// to the post-barrier, we note that G1 needs a RS update barrier +// which simply enqueues a (sequence of) dirty cards which may +// optionally be refined by the concurrent update threads. Note +// that this barrier need only be applied to a non-young write, +// but, like in CMS, because of the presence of concurrent refinement +// (much like CMS' precleaning), must strictly follow the oop-store. +// Thus, using the same protocol for maintaining the intended +// invariants turns out, serendepitously, to be the same for both +// G1 and CMS. +// +// For any future collector, this code should be reexamined with +// that specific collector in mind, and the documentation above suitably +// extended and updated. +void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) { + if (!ReduceInitialCardMarks) { + return; + } + // If a previous card-mark was deferred, flush it now. + flush_deferred_card_mark_barrier(thread); + if (new_obj->is_typeArray() || is_in_young(new_obj)) { + // Arrays of non-references don't need a post-barrier. + // The deferred_card_mark region should be empty + // following the flush above. + assert(thread->deferred_card_mark().is_empty(), "Error"); + } else { + MemRegion mr((HeapWord*)new_obj, new_obj->size()); + assert(!mr.is_empty(), "Error"); + if (_defer_initial_card_mark) { + // Defer the card mark + thread->set_deferred_card_mark(mr); + } else { + // Do the card mark + write_region(mr); + } + } +} + +void CardTableModRefBS::initialize_deferred_card_mark_barriers() { + // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used); + // otherwise remains unused. +#if defined(COMPILER2) || INCLUDE_JVMCI + _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() + && (DeferInitialCardMark || card_mark_must_follow_store()); +#else + assert(_defer_initial_card_mark == false, "Who would set it?"); +#endif +} + +void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) { +#if defined(COMPILER2) || INCLUDE_JVMCI + MemRegion deferred = thread->deferred_card_mark(); + if (!deferred.is_empty()) { + assert(_defer_initial_card_mark, "Otherwise should be empty"); + { + // Verify that the storage points to a parsable object in heap + DEBUG_ONLY(oop old_obj = oop(deferred.start());) + assert(!is_in_young(old_obj), + "Else should have been filtered in on_slowpath_allocation_exit()"); + assert(oopDesc::is_oop(old_obj, true), "Not an oop"); + assert(deferred.word_size() == (size_t)(old_obj->size()), + "Mismatch: multiple objects?"); + } + write_region(deferred); + // "Clear" the deferred_card_mark field + thread->set_deferred_card_mark(MemRegion()); + } + assert(thread->deferred_card_mark().is_empty(), "invariant"); +#else + assert(!_defer_initial_card_mark, "Should be false"); + assert(thread->deferred_card_mark().is_empty(), "Should be empty"); +#endif +} + +void CardTableModRefBS::flush_deferred_barriers(JavaThread* thread) { + // The deferred store barriers must all have been flushed to the + // card-table (or other remembered set structure) before GC starts + // processing the card-table (or other remembered set). + flush_deferred_card_mark_barrier(thread); +} diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/cardTableModRefBS.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/cardTableModRefBS.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,10 @@ CT_MR_BS_last_reserved = 16 }; + // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 + // or INCLUDE_JVMCI is being used + bool _defer_initial_card_mark; + // a word's worth (row) of clean card values static const intptr_t clean_card_row = (intptr_t)(-1); @@ -180,8 +184,8 @@ CardTableModRefBS(MemRegion whole_heap, const BarrierSet::FakeRtti& fake_rtti); ~CardTableModRefBS(); - protected: - void write_region_work(MemRegion mr) { + public: + void write_region(MemRegion mr) { dirty_MemRegion(mr); } @@ -314,6 +318,49 @@ void verify_not_dirty_region(MemRegion mr) PRODUCT_RETURN; void verify_dirty_region(MemRegion mr) PRODUCT_RETURN; + // ReduceInitialCardMarks + void initialize_deferred_card_mark_barriers(); + + // If the CollectedHeap was asked to defer a store barrier above, + // this informs it to flush such a deferred store barrier to the + // remembered set. + void flush_deferred_card_mark_barrier(JavaThread* thread); + + // Can a compiler initialize a new object without store barriers? + // This permission only extends from the creation of a new object + // via a TLAB up to the first subsequent safepoint. If such permission + // is granted for this heap type, the compiler promises to call + // defer_store_barrier() below on any slow path allocation of + // a new object for which such initializing store barriers will + // have been elided. G1, like CMS, allows this, but should be + // ready to provide a compensating write barrier as necessary + // if that storage came out of a non-young region. The efficiency + // of this implementation depends crucially on being able to + // answer very efficiently in constant time whether a piece of + // storage in the heap comes from a young region or not. + // See ReduceInitialCardMarks. + virtual bool can_elide_tlab_store_barriers() const { + return true; + } + + // If a compiler is eliding store barriers for TLAB-allocated objects, + // we will be informed of a slow-path allocation by a call + // to on_slowpath_allocation_exit() below. Such a call precedes the + // initialization of the object itself, and no post-store-barriers will + // be issued. Some heap types require that the barrier strictly follows + // the initializing stores. (This is currently implemented by deferring the + // barrier until the next slow-path allocation or gc-related safepoint.) + // This interface answers whether a particular barrier type needs the card + // mark to be thus strictly sequenced after the stores. + virtual bool card_mark_must_follow_store() const = 0; + + virtual bool is_in_young(oop obj) const = 0; + + virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj); + virtual void flush_deferred_barriers(JavaThread* thread); + + virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); } + template class AccessBarrier: public ModRefBarrierSet::AccessBarrier {}; }; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp --- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -121,3 +121,6 @@ } } +bool CardTableModRefBSForCTRS::is_in_young(oop obj) const { + return GenCollectedHeap::heap()->is_in_young(obj); +} diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp --- a/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/cardTableModRefBSForCTRS.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,6 +43,12 @@ void set_CTRS(CardTableRS* rs) { _rs = rs; } + virtual bool card_mark_must_follow_store() const { + return UseConcMarkSweepGC; + } + + virtual bool is_in_young(oop obj) const; + private: CardTableRS* _rs; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/collectedHeap.cpp --- a/src/hotspot/share/gc/shared/collectedHeap.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -177,8 +177,7 @@ _total_collections(0), _total_full_collections(0), _gc_cause(GCCause::_no_gc), - _gc_lastcause(GCCause::_no_gc), - _defer_initial_card_mark(false) // strengthened by subclass in pre_initialize() below. + _gc_lastcause(GCCause::_no_gc) { const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t elements_per_word = HeapWordSize / sizeof(jint); @@ -239,17 +238,6 @@ BarrierSet::set_bs(barrier_set); } -void CollectedHeap::pre_initialize() { - // Used for ReduceInitialCardMarks (when COMPILER2 is used); - // otherwise remains unused. -#if COMPILER2_OR_JVMCI - _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers() - && (DeferInitialCardMark || card_mark_must_follow_store()); -#else - assert(_defer_initial_card_mark == false, "Who would set it?"); -#endif -} - #ifndef PRODUCT void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { if (CheckMemoryInitialization && ZapUnusedHeapArea) { @@ -333,28 +321,6 @@ return obj; } -void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { - MemRegion deferred = thread->deferred_card_mark(); - if (!deferred.is_empty()) { - assert(_defer_initial_card_mark, "Otherwise should be empty"); - { - // Verify that the storage points to a parsable object in heap - DEBUG_ONLY(oop old_obj = oop(deferred.start());) - assert(is_in(old_obj), "Not in allocated heap"); - assert(!can_elide_initializing_store_barrier(old_obj), - "Else should have been filtered in new_store_pre_barrier()"); - assert(oopDesc::is_oop(old_obj, true), "Not an oop"); - assert(deferred.word_size() == (size_t)(old_obj->size()), - "Mismatch: multiple objects?"); - } - BarrierSet* bs = barrier_set(); - bs->write_region(deferred); - // "Clear" the deferred_card_mark field - thread->set_deferred_card_mark(MemRegion()); - } - assert(thread->deferred_card_mark().is_empty(), "invariant"); -} - size_t CollectedHeap::max_tlab_size() const { // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE]. // This restriction could be removed by enabling filling with multiple arrays. @@ -370,72 +336,6 @@ return align_down(max_int_size, MinObjAlignment); } -// Helper for ReduceInitialCardMarks. For performance, -// compiled code may elide card-marks for initializing stores -// to a newly allocated object along the fast-path. We -// compensate for such elided card-marks as follows: -// (a) Generational, non-concurrent collectors, such as -// GenCollectedHeap(ParNew,DefNew,Tenured) and -// ParallelScavengeHeap(ParallelGC, ParallelOldGC) -// need the card-mark if and only if the region is -// in the old gen, and do not care if the card-mark -// succeeds or precedes the initializing stores themselves, -// so long as the card-mark is completed before the next -// scavenge. For all these cases, we can do a card mark -// at the point at which we do a slow path allocation -// in the old gen, i.e. in this call. -// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires -// in addition that the card-mark for an old gen allocated -// object strictly follow any associated initializing stores. -// In these cases, the memRegion remembered below is -// used to card-mark the entire region either just before the next -// slow-path allocation by this thread or just before the next scavenge or -// CMS-associated safepoint, whichever of these events happens first. -// (The implicit assumption is that the object has been fully -// initialized by this point, a fact that we assert when doing the -// card-mark.) -// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a -// G1 concurrent marking is in progress an SATB (pre-write-)barrier -// is used to remember the pre-value of any store. Initializing -// stores will not need this barrier, so we need not worry about -// compensating for the missing pre-barrier here. Turning now -// to the post-barrier, we note that G1 needs a RS update barrier -// which simply enqueues a (sequence of) dirty cards which may -// optionally be refined by the concurrent update threads. Note -// that this barrier need only be applied to a non-young write, -// but, like in CMS, because of the presence of concurrent refinement -// (much like CMS' precleaning), must strictly follow the oop-store. -// Thus, using the same protocol for maintaining the intended -// invariants turns out, serendepitously, to be the same for both -// G1 and CMS. -// -// For any future collector, this code should be reexamined with -// that specific collector in mind, and the documentation above suitably -// extended and updated. -oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { - // If a previous card-mark was deferred, flush it now. - flush_deferred_store_barrier(thread); - if (can_elide_initializing_store_barrier(new_obj) || - new_obj->is_typeArray()) { - // Arrays of non-references don't need a pre-barrier. - // The deferred_card_mark region should be empty - // following the flush above. - assert(thread->deferred_card_mark().is_empty(), "Error"); - } else { - MemRegion mr((HeapWord*)new_obj, new_obj->size()); - assert(!mr.is_empty(), "Error"); - if (_defer_initial_card_mark) { - // Defer the card mark - thread->set_deferred_card_mark(mr); - } else { - // Do the card mark - BarrierSet* bs = barrier_set(); - bs->write_region(mr); - } - } - return new_obj; -} - size_t CollectedHeap::filler_array_hdr_size() { return align_object_offset(arrayOopDesc::header_size(T_INT)); // align to Long } @@ -538,24 +438,16 @@ " otherwise concurrent mutator activity may make heap " " unparsable again"); const bool use_tlab = UseTLAB; - const bool deferred = _defer_initial_card_mark; // The main thread starts allocating via a TLAB even before it // has added itself to the threads list at vm boot-up. JavaThreadIteratorWithHandle jtiwh; assert(!use_tlab || jtiwh.length() > 0, "Attempt to fill tlabs before main thread has been added" " to threads list is doomed to failure!"); + BarrierSet *bs = barrier_set(); for (; JavaThread *thread = jtiwh.next(); ) { if (use_tlab) thread->tlab().make_parsable(retire_tlabs); -#if COMPILER2_OR_JVMCI - // The deferred store barriers must all have been flushed to the - // card-table (or other remembered set structure) before GC starts - // processing the card-table (or other remembered set). - if (deferred) flush_deferred_store_barrier(thread); -#else - assert(!deferred, "Should be false"); - assert(thread->deferred_card_mark().is_empty(), "Should be empty"); -#endif + bs->make_parsable(thread); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/collectedHeap.hpp --- a/src/hotspot/share/gc/shared/collectedHeap.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,10 +101,6 @@ GCHeapLog* _gc_heap_log; - // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2 - // or INCLUDE_JVMCI is being used - bool _defer_initial_card_mark; - MemRegion _reserved; protected: @@ -129,13 +125,6 @@ // Constructor CollectedHeap(); - // Do common initializations that must follow instance construction, - // for example, those needing virtual calls. - // This code could perhaps be moved into initialize() but would - // be slightly more awkward because we want the latter to be a - // pure virtual. - void pre_initialize(); - // Create a new tlab. All TLAB allocations must go through this. virtual HeapWord* allocate_new_tlab(size_t size); @@ -408,45 +397,6 @@ return 0; } - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. If such permission - // is granted for this heap type, the compiler promises to call - // defer_store_barrier() below on any slow path allocation of - // a new object for which such initializing store barriers will - // have been elided. - virtual bool can_elide_tlab_store_barriers() const = 0; - - // If a compiler is eliding store barriers for TLAB-allocated objects, - // there is probably a corresponding slow path which can produce - // an object allocated anywhere. The compiler's runtime support - // promises to call this function on such a slow-path-allocated - // object before performing initializations that have elided - // store barriers. Returns new_obj, or maybe a safer copy thereof. - virtual oop new_store_pre_barrier(JavaThread* thread, oop new_obj); - - // Answers whether an initializing store to a new object currently - // allocated at the given address doesn't need a store - // barrier. Returns "true" if it doesn't need an initializing - // store barrier; answers "false" if it does. - virtual bool can_elide_initializing_store_barrier(oop new_obj) = 0; - - // If a compiler is eliding store barriers for TLAB-allocated objects, - // we will be informed of a slow-path allocation by a call - // to new_store_pre_barrier() above. Such a call precedes the - // initialization of the object itself, and no post-store-barriers will - // be issued. Some heap types require that the barrier strictly follows - // the initializing stores. (This is currently implemented by deferring the - // barrier until the next slow-path allocation or gc-related safepoint.) - // This interface answers whether a particular heap type needs the card - // mark to be thus strictly sequenced after the stores. - virtual bool card_mark_must_follow_store() const = 0; - - // If the CollectedHeap was asked to defer a store barrier above, - // this informs it to flush such a deferred store barrier to the - // remembered set. - virtual void flush_deferred_store_barrier(JavaThread* thread); - // Perform a collection of the heap; intended for use in implementing // "System.gc". This probably implies as full a collection as the // "CollectedHeap" supports. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -71,8 +71,6 @@ } jint GenCollectedHeap::initialize() { - CollectedHeap::pre_initialize(); - // While there are no constraints in the GC code that HeapWordSize // be any particular value, there are multiple other areas in the // system which believe this to be true (e.g. oop->object_size in some diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/genCollectedHeap.hpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -270,22 +270,6 @@ virtual size_t unsafe_max_tlab_alloc(Thread* thr) const; virtual HeapWord* allocate_new_tlab(size_t size); - // Can a compiler initialize a new object without store barriers? - // This permission only extends from the creation of a new object - // via a TLAB up to the first subsequent safepoint. - virtual bool can_elide_tlab_store_barriers() const { - return true; - } - - // We don't need barriers for stores to objects in the - // young gen and, a fortiori, for initializing stores to - // objects therein. This applies to DefNew+Tenured and ParNew+CMS - // only and may need to be re-examined in case other - // kinds of collectors are implemented in the future. - virtual bool can_elide_initializing_store_barrier(oop new_obj) { - return is_in_young(new_obj); - } - // The "requestor" generation is performing some garbage collection // action for which it would be useful to have scratch space. The // requestor promises to allocate no more than "max_alloc_words" in any diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/modRefBarrierSet.hpp --- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,6 +45,7 @@ // Causes all refs in "mr" to be assumed to be modified. virtual void invalidate(MemRegion mr) = 0; + virtual void write_region(MemRegion mr) = 0; // The caller guarantees that "mr" contains no references. (Perhaps it's // objects have been moved elsewhere.) diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp --- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -73,7 +73,7 @@ if (!HasDecorator::value) { // Optimized covariant case bs->write_ref_array_pre(dst, (int)length, - HasDecorator::value); + HasDecorator::value); Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length); bs->write_ref_array((HeapWord*)dst, length); } else { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/plab.hpp --- a/src/hotspot/share/gc/shared/plab.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/plab.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -72,7 +72,6 @@ // Initializes the buffer to be empty, but with the given "word_sz". // Must get initialized with "set_buf" for an allocation to succeed. PLAB(size_t word_sz); - virtual ~PLAB() {} static size_t size_required_for_allocation(size_t word_size) { return word_size + AlignmentReserve; } @@ -120,7 +119,7 @@ } // Sets the space of the buffer to be [buf, space+word_sz()). - virtual void set_buf(HeapWord* buf, size_t new_word_sz) { + void set_buf(HeapWord* buf, size_t new_word_sz) { assert(new_word_sz > AlignmentReserve, "Too small"); _word_sz = new_word_sz; @@ -136,11 +135,11 @@ // Flush allocation statistics into the given PLABStats supporting ergonomic // sizing of PLAB's and retire the current buffer. To be called at the end of // GC. - virtual void flush_and_retire_stats(PLABStats* stats); + void flush_and_retire_stats(PLABStats* stats); // Fills in the unallocated portion of the buffer with a garbage object and updates // statistics. To be called during GC. - virtual void retire(); + void retire(); }; // PLAB book-keeping. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/taskqueue.cpp --- a/src/hotspot/share/gc/shared/taskqueue.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/taskqueue.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -153,7 +153,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) { assert(_n_threads > 0, "Initialization is incorrect"); assert(_offered_termination < _n_threads, "Invariant"); - Atomic::inc((int *)&_offered_termination); + Atomic::inc(&_offered_termination); uint yield_count = 0; // Number of hard spin loops done since last yield @@ -228,7 +228,7 @@ #endif if (peek_in_queue_set() || (terminator != NULL && terminator->should_exit_termination())) { - Atomic::dec((int *)&_offered_termination); + Atomic::dec(&_offered_termination); assert(_offered_termination < _n_threads, "Invariant"); return false; } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/taskqueue.inline.hpp --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -205,7 +205,7 @@ #if !(defined SPARC || defined IA32 || defined AMD64) OrderAccess::fence(); #endif - uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom); + uint localBot = OrderAccess::load_acquire(&_bottom); uint n_elems = size(localBot, oldAge.top()); if (n_elems == 0) { return false; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/gc/shared/workgroup.cpp --- a/src/hotspot/share/gc/shared/workgroup.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/gc/shared/workgroup.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -157,7 +157,7 @@ // Wait for the coordinator to dispatch a task. _start_semaphore->wait(); - uint num_started = (uint) Atomic::add(1, (volatile jint*)&_started); + uint num_started = Atomic::add(1u, &_started); // Subtract one to get a zero-indexed worker id. uint worker_id = num_started - 1; @@ -168,7 +168,7 @@ void worker_done_with_task() { // Mark that the worker is done with the task. // The worker is not allowed to read the state variables after this line. - uint not_finished = (uint) Atomic::add(-1, (volatile jint*)&_not_finished); + uint not_finished = Atomic::sub(1u, &_not_finished); // The last worker signals to the coordinator that all work is completed. if (not_finished == 0) { @@ -439,7 +439,7 @@ #ifdef ASSERT if (!res) { assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?"); - Atomic::inc((volatile jint*) &_claimed); + Atomic::inc(&_claimed); } #endif return res; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/jvmci/jvmciRuntime.cpp --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -116,10 +116,7 @@ oop obj = ik->allocate_instance(CHECK); thread->set_vm_result(obj); JRT_BLOCK_END; - - if (ReduceInitialCardMarks) { - new_store_pre_barrier(thread); - } + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END JRT_BLOCK_ENTRY(void, JVMCIRuntime::new_array(JavaThread* thread, Klass* array_klass, jint length)) @@ -151,29 +148,9 @@ } } JRT_BLOCK_END; - - if (ReduceInitialCardMarks) { - new_store_pre_barrier(thread); - } + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END -void JVMCIRuntime::new_store_pre_barrier(JavaThread* thread) { - // After any safepoint, just before going back to compiled code, - // we inform the GC that we will be doing initializing writes to - // this object in the future without emitting card-marks, so - // GC may take any compensating steps. - // NOTE: Keep this code consistent with GraphKit::store_barrier. - - oop new_obj = thread->vm_result(); - if (new_obj == NULL) return; - - assert(Universe::heap()->can_elide_tlab_store_barriers(), - "compiler must check this first"); - // GC may decide to give back a safer copy of new_obj. - new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj); - thread->set_vm_result(new_obj); -} - JRT_ENTRY(void, JVMCIRuntime::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims)) assert(klass->is_klass(), "not a class"); assert(rank >= 1, "rank must be nonzero"); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/jvmci/jvmciRuntime.hpp --- a/src/hotspot/share/jvmci/jvmciRuntime.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -154,7 +154,6 @@ static void write_barrier_pre(JavaThread* thread, oopDesc* obj); static void write_barrier_post(JavaThread* thread, void* card); static jboolean validate_object(JavaThread* thread, oopDesc* parent, oopDesc* child); - static void new_store_pre_barrier(JavaThread* thread); // used to throw exceptions from compiled JVMCI code static void throw_and_post_jvmti_exception(JavaThread* thread, const char* exception, const char* message); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/logging/logTag.hpp --- a/src/hotspot/share/logging/logTag.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/logging/logTag.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -60,6 +60,7 @@ LOG_TAG(cset) \ LOG_TAG(data) \ LOG_TAG(datacreation) \ + LOG_TAG(decoder) \ LOG_TAG(defaultmethods) \ LOG_TAG(dump) \ LOG_TAG(ergo) \ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/memory/allocation.inline.hpp --- a/src/hotspot/share/memory/allocation.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/memory/allocation.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -38,9 +38,9 @@ inline void inc_stat_counter(volatile julong* dest, julong add_value) { #if defined(SPARC) || defined(X86) // Sparc and X86 have atomic jlong (8 bytes) instructions - julong value = Atomic::load((volatile jlong*)dest); + julong value = Atomic::load(dest); value += add_value; - Atomic::store((jlong)value, (volatile jlong*)dest); + Atomic::store(value, dest); #else // possible word-tearing during load/store *dest += add_value; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/memory/filemap.cpp --- a/src/hotspot/share/memory/filemap.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/memory/filemap.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -410,14 +410,11 @@ // Write the FileMapInfo information to the file. void FileMapInfo::open_for_write() { - _full_path = Arguments::GetSharedArchivePath(); - if (log_is_enabled(Info, cds)) { - ResourceMark rm; - LogMessage(cds) msg; - stringStream info_stream; - info_stream.print_cr("Dumping shared data to file: "); - info_stream.print_cr(" %s", _full_path); - msg.info("%s", info_stream.as_string()); + _full_path = Arguments::GetSharedArchivePath(); + LogMessage(cds) msg; + if (msg.is_info()) { + msg.info("Dumping shared data to file: "); + msg.info(" %s", _full_path); } #ifdef _WINDOWS // On Windows, need WRITE permission to remove the file. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/memory/metaspace.cpp --- a/src/hotspot/share/memory/metaspace.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/memory/metaspace.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -4372,7 +4372,7 @@ // ChunkManagerReturnTest stresses taking/returning chunks from the ChunkManager. It takes and // returns chunks from/to the ChunkManager while keeping track of the expected ChunkManager // content. -class ChunkManagerReturnTestImpl { +class ChunkManagerReturnTestImpl : public CHeapObj { VirtualSpaceNode _vsn; ChunkManager _cm; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -883,13 +883,11 @@ const char *sep = "--------------------+---------------------------+---------------------------+--------------------------"; const char *hdr = " ro_cnt ro_bytes % | rw_cnt rw_bytes % | all_cnt all_bytes %"; - ResourceMark rm; LogMessage(cds) msg; - stringStream info_stream; - info_stream.print_cr("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); - info_stream.print_cr("%s", hdr); - info_stream.print_cr("%s", sep); + msg.info("Detailed metadata info (excluding od/st regions; rw stats include md/mc regions):"); + msg.info("%s", hdr); + msg.info("%s", sep); for (int type = 0; type < int(_number_of_types); type ++) { const char *name = type_name((Type)type); int ro_count = _counts[RO][type]; @@ -903,7 +901,7 @@ double rw_perc = percent_of(rw_bytes, rw_all); double perc = percent_of(bytes, ro_all + rw_all); - info_stream.print_cr(fmt_stats, name, + msg.info(fmt_stats, name, ro_count, ro_bytes, ro_perc, rw_count, rw_bytes, rw_perc, count, bytes, perc); @@ -921,8 +919,8 @@ double all_rw_perc = percent_of(all_rw_bytes, rw_all); double all_perc = percent_of(all_bytes, ro_all + rw_all); - info_stream.print_cr("%s", sep); - info_stream.print_cr(fmt_stats, "Total", + msg.info("%s", sep); + msg.info(fmt_stats, "Total", all_ro_count, all_ro_bytes, all_ro_perc, all_rw_count, all_rw_bytes, all_rw_perc, all_count, all_bytes, all_perc); @@ -930,7 +928,6 @@ assert(all_ro_bytes == ro_all, "everything should have been counted"); assert(all_rw_bytes == rw_all, "everything should have been counted"); - msg.info("%s", info_stream.as_string()); #undef fmt_stats } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/memory/universe.cpp --- a/src/hotspot/share/memory/universe.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/memory/universe.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -542,32 +542,6 @@ #undef assert_pll_locked #undef assert_pll_ownership - -static bool has_run_finalizers_on_exit = false; - -void Universe::run_finalizers_on_exit() { - if (has_run_finalizers_on_exit) return; - has_run_finalizers_on_exit = true; - - // Called on VM exit. This ought to be run in a separate thread. - log_trace(ref)("Callback to run finalizers on exit"); - { - PRESERVE_EXCEPTION_MARK; - Klass* finalizer_klass = SystemDictionary::Finalizer_klass(); - JavaValue result(T_VOID); - JavaCalls::call_static( - &result, - finalizer_klass, - vmSymbols::run_finalizers_on_exit_name(), - vmSymbols::void_method_signature(), - THREAD - ); - // Ignore any pending exceptions - CLEAR_PENDING_EXCEPTION; - } -} - - // initialize_vtable could cause gc if // 1) we specified true to initialize_vtable and // 2) this ran after gc was enabled diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/memory/universe.hpp --- a/src/hotspot/share/memory/universe.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/memory/universe.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -464,9 +464,6 @@ static bool should_fill_in_stack_trace(Handle throwable); static void check_alignment(uintx size, uintx alignment, const char* name); - // Finalizer support. - static void run_finalizers_on_exit(); - // Iteration // Apply "f" to the addresses of all the direct heap pointers maintained diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/oops/access.hpp --- a/src/hotspot/share/oops/access.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/oops/access.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -155,6 +155,8 @@ // - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks // - Accesses on HeapWord* translate to a runtime check choosing one of the above // - Accesses on other types translate to raw memory accesses without runtime checks +// * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by +// marking that the previous value is uninitialized nonsense rather than a real value. // * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects // alive, regardless of the type of reference being accessed. It will however perform the memory access // in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed, @@ -164,10 +166,12 @@ // responsibility of performing the access and what barriers to be performed to the GC. This is the default. // Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time // decorator for enabling primitive barriers is enabled for the build. -const DecoratorSet AS_RAW = UCONST64(1) << 11; -const DecoratorSet AS_NO_KEEPALIVE = UCONST64(1) << 12; -const DecoratorSet AS_NORMAL = UCONST64(1) << 13; -const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_NO_KEEPALIVE | AS_NORMAL; +const DecoratorSet AS_RAW = UCONST64(1) << 11; +const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 12; +const DecoratorSet AS_NO_KEEPALIVE = UCONST64(1) << 13; +const DecoratorSet AS_NORMAL = UCONST64(1) << 14; +const DecoratorSet AS_DECORATOR_MASK = AS_RAW | AS_DEST_NOT_INITIALIZED | + AS_NO_KEEPALIVE | AS_NORMAL; // === Reference Strength Decorators === // These decorators only apply to accesses on oop-like types (oop/narrowOop). @@ -178,10 +182,10 @@ // * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength. // This could for example come from the unsafe API. // * Default (no explicit reference strength specified): ON_STRONG_OOP_REF -const DecoratorSet ON_STRONG_OOP_REF = UCONST64(1) << 14; -const DecoratorSet ON_WEAK_OOP_REF = UCONST64(1) << 15; -const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 16; -const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 17; +const DecoratorSet ON_STRONG_OOP_REF = UCONST64(1) << 15; +const DecoratorSet ON_WEAK_OOP_REF = UCONST64(1) << 16; +const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 17; +const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 18; const DecoratorSet ON_DECORATOR_MASK = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF; @@ -196,23 +200,21 @@ // * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap, // but is notably not scanned during safepoints. This is sometimes a special case for some GCs and // implies that it is also an IN_ROOT. -const DecoratorSet IN_HEAP = UCONST64(1) << 18; -const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 19; -const DecoratorSet IN_ROOT = UCONST64(1) << 20; -const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 21; -const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 22; +const DecoratorSet IN_HEAP = UCONST64(1) << 19; +const DecoratorSet IN_HEAP_ARRAY = UCONST64(1) << 20; +const DecoratorSet IN_ROOT = UCONST64(1) << 21; +const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 22; +const DecoratorSet IN_ARCHIVE_ROOT = UCONST64(1) << 23; const DecoratorSet IN_DECORATOR_MASK = IN_HEAP | IN_HEAP_ARRAY | IN_ROOT | IN_CONCURRENT_ROOT | IN_ARCHIVE_ROOT; // == Value Decorators == // * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops. -const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 23; +const DecoratorSet OOP_NOT_NULL = UCONST64(1) << 24; const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL; // == Arraycopy Decorators == -// * ARRAYCOPY_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by -// marking that the previous value uninitialized nonsense rather than a real value. // * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source // are not guaranteed to be subclasses of the class of the destination array. This requires // a check-cast barrier during the copying operation. If this is not set, it is assumed @@ -222,14 +224,12 @@ // * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form. // * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements. // * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord. -const DecoratorSet ARRAYCOPY_DEST_NOT_INITIALIZED = UCONST64(1) << 24; const DecoratorSet ARRAYCOPY_CHECKCAST = UCONST64(1) << 25; const DecoratorSet ARRAYCOPY_DISJOINT = UCONST64(1) << 26; const DecoratorSet ARRAYCOPY_ARRAYOF = UCONST64(1) << 27; const DecoratorSet ARRAYCOPY_ATOMIC = UCONST64(1) << 28; const DecoratorSet ARRAYCOPY_ALIGNED = UCONST64(1) << 29; -const DecoratorSet ARRAYCOPY_DECORATOR_MASK = ARRAYCOPY_DEST_NOT_INITIALIZED | - ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT | +const DecoratorSet ARRAYCOPY_DECORATOR_MASK = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT | ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF | ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED; @@ -343,8 +343,8 @@ template static void verify_primitive_decorators() { - const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE) | IN_HEAP | - IN_HEAP_ARRAY; + const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE ^ AS_DEST_NOT_INITIALIZED) | + IN_HEAP | IN_HEAP_ARRAY; verify_decorators(); } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/oops/access.inline.hpp --- a/src/hotspot/share/oops/access.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/oops/access.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1060,6 +1060,7 @@ const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK; STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 || + (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 || (barrier_strength_decorators ^ AS_RAW) == 0 || (barrier_strength_decorators ^ AS_NORMAL) == 0 )); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/callnode.cpp --- a/src/hotspot/share/opto/callnode.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/callnode.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1441,8 +1441,10 @@ if (!allow_new_nodes) return NULL; // Create a cast which is control dependent on the initialization to // propagate the fact that the array length must be positive. + InitializeNode* init = initialization(); + assert(init != NULL, "initialization not found"); length = new CastIINode(length, narrow_length_type); - length->set_req(0, initialization()->proj_out_or_null(0)); + length->set_req(0, init->proj_out_or_null(0)); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/graphKit.cpp --- a/src/hotspot/share/opto/graphKit.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/graphKit.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -3861,7 +3861,7 @@ if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) { // We can skip marks on a freshly-allocated object in Eden. - // Keep this code in sync with new_store_pre_barrier() in runtime.cpp. + // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp. // That routine informs GC to take appropriate compensating steps, // upon a slow-path allocation, so as to make this card-mark // elision safe. @@ -4159,7 +4159,7 @@ * as part of the allocation in the case the allocated object is not located * in the nursery, this would happen for humongous objects. This is similar to * how CMS is required to handle this case, see the comments for the method - * CollectedHeap::new_store_pre_barrier and OptoRuntime::new_store_pre_barrier. + * CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier. * A deferred card mark is required for these objects and handled in the above * mentioned methods. * @@ -4249,7 +4249,7 @@ if (use_ReduceInitialCardMarks() && obj == just_allocated_object(control())) { // We can skip marks on a freshly-allocated object in Eden. - // Keep this code in sync with new_store_pre_barrier() in runtime.cpp. + // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp. // That routine informs GC to take appropriate compensating steps, // upon a slow-path allocation, so as to make this card-mark // elision safe. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/graphKit.hpp --- a/src/hotspot/share/opto/graphKit.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/graphKit.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -755,8 +755,10 @@ Node* just_allocated_object(Node* current_control); static bool use_ReduceInitialCardMarks() { - return (ReduceInitialCardMarks - && Universe::heap()->can_elide_tlab_store_barriers()); + BarrierSet *bs = Universe::heap()->barrier_set(); + return bs->is_a(BarrierSet::CardTableModRef) + && barrier_set_cast(bs)->can_elide_tlab_store_barriers() + && ReduceInitialCardMarks; } // Sync Ideal and Graph kits. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/loopnode.cpp --- a/src/hotspot/share/opto/loopnode.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/loopnode.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -2347,7 +2347,7 @@ tty->print(" "); tty->print("Loop: N%d/N%d ",_head->_idx,_tail->_idx); if (_irreducible) tty->print(" IRREDUCIBLE"); - Node* entry = _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl); + Node* entry = _head->is_Loop() ? _head->as_Loop()->skip_strip_mined(-1)->in(LoopNode::EntryControl) : _head->in(LoopNode::EntryControl); Node* predicate = PhaseIdealLoop::find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check); if (predicate != NULL ) { tty->print(" limit_check"); @@ -2398,7 +2398,7 @@ if (Verbose) { tty->print(" body={"); _body.dump_simple(); tty->print(" }"); } - if (_head->as_Loop()->is_strip_mined()) { + if (_head->is_Loop() && _head->as_Loop()->is_strip_mined()) { tty->print(" strip_mined"); } tty->cr(); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/loopopts.cpp --- a/src/hotspot/share/opto/loopopts.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/loopopts.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -693,7 +693,9 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) { // Store has to be first in the loop body IdealLoopTree *n_loop = get_loop(n_ctrl); - if (n->is_Store() && n_loop != _ltree_root && n_loop->is_loop() && n->in(0) != NULL) { + if (n->is_Store() && n_loop != _ltree_root && + n_loop->is_loop() && n_loop->_head->is_Loop() && + n->in(0) != NULL) { Node* address = n->in(MemNode::Address); Node* value = n->in(MemNode::ValueIn); Node* mem = n->in(MemNode::Memory); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/runtime.cpp --- a/src/hotspot/share/opto/runtime.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/runtime.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -194,23 +194,6 @@ // We failed the fast-path allocation. Now we need to do a scavenge or GC // and try allocation again. -void OptoRuntime::new_store_pre_barrier(JavaThread* thread) { - // After any safepoint, just before going back to compiled code, - // we inform the GC that we will be doing initializing writes to - // this object in the future without emitting card-marks, so - // GC may take any compensating steps. - // NOTE: Keep this code consistent with GraphKit::store_barrier. - - oop new_obj = thread->vm_result(); - if (new_obj == NULL) return; - - assert(Universe::heap()->can_elide_tlab_store_barriers(), - "compiler must check this first"); - // GC may decide to give back a safer copy of new_obj. - new_obj = Universe::heap()->new_store_pre_barrier(thread, new_obj); - thread->set_vm_result(new_obj); -} - // object allocation JRT_BLOCK_ENTRY(void, OptoRuntime::new_instance_C(Klass* klass, JavaThread* thread)) JRT_BLOCK; @@ -244,10 +227,8 @@ deoptimize_caller_frame(thread, HAS_PENDING_EXCEPTION); JRT_BLOCK_END; - if (GraphKit::use_ReduceInitialCardMarks()) { - // inform GC that we won't do card marks for initializing writes. - new_store_pre_barrier(thread); - } + // inform GC that we won't do card marks for initializing writes. + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END @@ -284,10 +265,8 @@ thread->set_vm_result(result); JRT_BLOCK_END; - if (GraphKit::use_ReduceInitialCardMarks()) { - // inform GC that we won't do card marks for initializing writes. - new_store_pre_barrier(thread); - } + // inform GC that we won't do card marks for initializing writes. + SharedRuntime::on_slowpath_allocation_exit(thread); JRT_END // array allocation without zeroing @@ -314,10 +293,9 @@ thread->set_vm_result(result); JRT_BLOCK_END; - if (GraphKit::use_ReduceInitialCardMarks()) { - // inform GC that we won't do card marks for initializing writes. - new_store_pre_barrier(thread); - } + + // inform GC that we won't do card marks for initializing writes. + SharedRuntime::on_slowpath_allocation_exit(thread); oop result = thread->vm_result(); if ((len > 0) && (result != NULL) && diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/opto/runtime.hpp --- a/src/hotspot/share/opto/runtime.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/opto/runtime.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -163,10 +163,6 @@ static void new_array_C(Klass* array_klass, int len, JavaThread *thread); static void new_array_nozero_C(Klass* array_klass, int len, JavaThread *thread); - // Post-slow-path-allocation, pre-initializing-stores step for - // implementing ReduceInitialCardMarks - static void new_store_pre_barrier(JavaThread* thread); - // Allocate storage for a multi-dimensional arrays // Note: needs to be fixed for arbitrary number of dimensions static void multianewarray2_C(Klass* klass, int len1, int len2, JavaThread *thread); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/prims/whitebox.cpp --- a/src/hotspot/share/prims/whitebox.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/prims/whitebox.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -60,6 +60,7 @@ #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/exceptions.hpp" +#include "utilities/elfFile.hpp" #include "utilities/macros.hpp" #if INCLUDE_CDS #include "prims/cdsoffsets.hpp" @@ -528,7 +529,7 @@ size_t total_memory() { return _total_memory; } size_t total_memory_to_free() { return _total_memory_to_free; } - bool doHeapRegion(HeapRegion* r) { + bool do_heap_region(HeapRegion* r) { if (r->is_old()) { size_t prev_live = r->marked_bytes(); size_t live = r->live_bytes(); @@ -1911,6 +1912,13 @@ os::print_os_info(tty); WB_END +// Elf decoder +WB_ENTRY(void, WB_DisableElfSectionCache(JNIEnv* env)) +#if !defined(_WINDOWS) && !defined(__APPLE__) && !defined(_AIX) + ElfFile::_do_not_cache_elf_section = true; +#endif +WB_END + #define CC (char*) @@ -2125,6 +2133,7 @@ (void*)&WB_CheckLibSpecifiesNoexecstack}, {CC"isContainerized", CC"()Z", (void*)&WB_IsContainerized }, {CC"printOsInfo", CC"()V", (void*)&WB_PrintOsInfo }, + {CC"disableElfSectionCache", CC"()V", (void*)&WB_DisableElfSectionCache }, }; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/arguments.cpp --- a/src/hotspot/share/runtime/arguments.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/arguments.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -537,6 +537,7 @@ { "SharedReadOnlySize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "SharedMiscDataSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, { "SharedMiscCodeSize", JDK_Version::undefined(), JDK_Version::jdk(10), JDK_Version::undefined() }, + { "UseUTCFileTimestamp", JDK_Version::undefined(), JDK_Version::jdk(11), JDK_Version::jdk(12) }, #ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS { "dep > obs", JDK_Version::jdk(9), JDK_Version::jdk(8), JDK_Version::undefined() }, diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/fieldDescriptor.cpp --- a/src/hotspot/share/runtime/fieldDescriptor.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/fieldDescriptor.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -201,6 +201,12 @@ } // Print a hint as to the underlying integer representation. This can be wrong for // pointers on an LP64 machine +#ifdef _LP64 + if ((ft == T_OBJECT || ft == T_ARRAY) && UseCompressedOops) { + st->print(" (%x)", obj->int_field(offset())); + } + else // <- intended +#endif if (ft == T_LONG || ft == T_DOUBLE LP64_ONLY(|| !is_java_primitive(ft)) ) { st->print(" (%x %x)", obj->int_field(offset()), obj->int_field(offset()+sizeof(jint))); } else if (as_int < 0 || as_int > 9) { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/os.cpp --- a/src/hotspot/share/runtime/os.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/os.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -85,7 +85,7 @@ julong os::free_bytes = 0; // # of bytes freed #endif -static juint cur_malloc_words = 0; // current size for MallocMaxTestWords +static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords void os_init_globals() { // Called from init_globals(). @@ -629,12 +629,12 @@ // static bool has_reached_max_malloc_test_peak(size_t alloc_size) { if (MallocMaxTestWords > 0) { - jint words = (jint)(alloc_size / BytesPerWord); + size_t words = (alloc_size / BytesPerWord); if ((cur_malloc_words + words) > MallocMaxTestWords) { return true; } - Atomic::add(words, (volatile jint *)&cur_malloc_words); + Atomic::add(words, &cur_malloc_words); } return false; } @@ -1826,8 +1826,7 @@ os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, os::SuspendResume::State to) { - os::SuspendResume::State result = - (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from); + os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from); if (result == from) { // success return to; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/os.hpp --- a/src/hotspot/share/runtime/os.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/os.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -27,6 +27,8 @@ #include "jvm.h" #include "jvmtifiles/jvmti.h" +#include "metaprogramming/isRegisteredEnum.hpp" +#include "metaprogramming/integralConstant.hpp" #include "runtime/extendedPC.hpp" #include "runtime/handles.hpp" #include "utilities/macros.hpp" @@ -907,11 +909,11 @@ class SuspendedThreadTask { public: SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} - virtual ~SuspendedThreadTask() {} void run(); bool is_done() { return _done; } virtual void do_task(const SuspendedThreadTaskContext& context) = 0; protected: + ~SuspendedThreadTask() {} private: void internal_do_task(); Thread* _thread; @@ -1006,6 +1008,10 @@ }; +#ifndef _WINDOWS +template<> struct IsRegisteredEnum : public TrueType {}; +#endif // !_WINDOWS + // Note that "PAUSE" is almost always used with synchronization // so arguably we should provide Atomic::SpinPause() instead // of the global SpinPause() with C linkage. diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/sharedRuntime.cpp --- a/src/hotspot/share/runtime/sharedRuntime.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/sharedRuntime.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3169,3 +3169,16 @@ } return activation; } + +void SharedRuntime::on_slowpath_allocation_exit(JavaThread* thread) { + // After any safepoint, just before going back to compiled code, + // we inform the GC that we will be doing initializing writes to + // this object in the future without emitting card-marks, so + // GC may take any compensating steps. + + oop new_obj = thread->vm_result(); + if (new_obj == NULL) return; + + BarrierSet *bs = Universe::heap()->barrier_set(); + bs->on_slowpath_allocation_exit(thread, new_obj); +} diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/sharedRuntime.hpp --- a/src/hotspot/share/runtime/sharedRuntime.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/sharedRuntime.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -211,6 +211,10 @@ static address deoptimize_for_implicit_exception(JavaThread* thread, address pc, CompiledMethod* nm, int deopt_reason); #endif + // Post-slow-path-allocation, pre-initializing-stores step for + // implementing e.g. ReduceInitialCardMarks + static void on_slowpath_allocation_exit(JavaThread* thread); + static void enable_stack_reserved_zone(JavaThread* thread); static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/stubRoutines.cpp --- a/src/hotspot/share/runtime/stubRoutines.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/stubRoutines.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -418,7 +418,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); - HeapAccess::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count); + HeapAccess::oop_arraycopy(NULL, NULL, (HeapWord*)src, (HeapWord*)dest, count); JRT_END JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count)) @@ -462,7 +462,7 @@ SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy #endif // !PRODUCT assert(count != 0, "count should be non-zero"); - HeapAccess::oop_arraycopy(NULL, NULL, src, dest, count); + HeapAccess::oop_arraycopy(NULL, NULL, src, dest, count); JRT_END address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/thread.cpp --- a/src/hotspot/share/runtime/thread.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/thread.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1994,20 +1994,10 @@ JvmtiExport::cleanup_thread(this); } - // We must flush any deferred card marks before removing a thread from - // the list of active threads. - Universe::heap()->flush_deferred_store_barrier(this); - assert(deferred_card_mark().is_empty(), "Should have been flushed"); - -#if INCLUDE_ALL_GCS - // We must flush the G1-related buffers before removing a thread - // from the list of active threads. We must do this after any deferred - // card marks have been flushed (above) so that any entries that are - // added to the thread's dirty card queue as a result are not lost. - if (UseG1GC) { - flush_barrier_queues(); - } -#endif // INCLUDE_ALL_GCS + // We must flush any deferred card marks and other various GC barrier + // related buffers (e.g. G1 SATB buffer and G1 dirty card queue buffer) + // before removing a thread from the list of active threads. + BarrierSet::barrier_set()->flush_deferred_barriers(this); log_info(os, thread)("JavaThread %s (tid: " UINTX_FORMAT ").", exit_type == JavaThread::normal_exit ? "exiting" : "detaching", @@ -4202,10 +4192,9 @@ // SystemDictionary::resolve_or_null will return null if there was // an exception. If we cannot load the Shutdown class, just don't // call Shutdown.shutdown() at all. This will mean the shutdown hooks - // and finalizers (if runFinalizersOnExit is set) won't be run. - // Note that if a shutdown hook was registered or runFinalizersOnExit - // was called, the Shutdown class would have already been loaded - // (Runtime.addShutdownHook and runFinalizersOnExit will load it). + // won't be run. Note that if a shutdown hook was registered, + // the Shutdown class would have already been loaded + // (Runtime.addShutdownHook will load it). JavaValue result(T_VOID); JavaCalls::call_static(&result, shutdown_klass, @@ -4228,7 +4217,7 @@ // + Wait until we are the last non-daemon thread to execute // <-- every thing is still working at this moment --> // + Call java.lang.Shutdown.shutdown(), which will invoke Java level -// shutdown hooks, run finalizers if finalization-on-exit +// shutdown hooks // + Call before_exit(), prepare for VM exit // > run VM level shutdown hooks (they are registered through JVM_OnExit(), // currently the only user of this mechanism is File.deleteOnExit()) diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/thread.inline.hpp --- a/src/hotspot/share/runtime/thread.inline.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/thread.inline.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,24 +30,18 @@ #include "runtime/thread.hpp" inline void Thread::set_suspend_flag(SuspendFlags f) { - assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); uint32_t flags; do { flags = _suspend_flags; } - while (Atomic::cmpxchg((jint)(flags | f), - (volatile jint*)&_suspend_flags, - (jint)flags) != (jint)flags); + while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags); } inline void Thread::clear_suspend_flag(SuspendFlags f) { - assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch"); uint32_t flags; do { flags = _suspend_flags; } - while (Atomic::cmpxchg((jint)(flags & ~f), - (volatile jint*)&_suspend_flags, - (jint)flags) != (jint)flags); + while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags); } inline void Thread::set_has_async_exception() { diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/runtime/vmStructs.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -466,6 +466,7 @@ nonstatic_field(CardGeneration, _capacity_at_prologue, size_t) \ nonstatic_field(CardGeneration, _used_at_prologue, size_t) \ \ + nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \ nonstatic_field(CardTableModRefBS, _whole_heap, const MemRegion) \ nonstatic_field(CardTableModRefBS, _guard_index, const size_t) \ nonstatic_field(CardTableModRefBS, _last_valid_index, const size_t) \ @@ -482,7 +483,6 @@ \ nonstatic_field(CollectedHeap, _reserved, MemRegion) \ nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \ - nonstatic_field(CollectedHeap, _defer_initial_card_mark, bool) \ nonstatic_field(CollectedHeap, _is_gc_active, bool) \ nonstatic_field(CollectedHeap, _total_collections, unsigned int) \ \ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/services/mallocSiteTable.hpp --- a/src/hotspot/share/services/mallocSiteTable.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/services/mallocSiteTable.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -151,7 +151,7 @@ ~AccessLock() { if (_lock_state == SharedLock) { - Atomic::dec((volatile jint*)_lock); + Atomic::dec(_lock); } } // Acquire shared lock. @@ -159,7 +159,7 @@ inline bool sharedLock() { jint res = Atomic::add(1, _lock); if (res < 0) { - Atomic::add(-1, _lock); + Atomic::dec(_lock); return false; } _lock_state = SharedLock; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/services/mallocTracker.hpp --- a/src/hotspot/share/services/mallocTracker.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/services/mallocTracker.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,8 +66,6 @@ assert(_size >= sz, "deallocation > allocated"); Atomic::dec(&_count); if (sz > 0) { - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning(suppress: 4146) Atomic::sub(sz, &_size); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/services/threadService.cpp --- a/src/hotspot/share/services/threadService.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/services/threadService.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -120,7 +120,7 @@ } void ThreadService::remove_thread(JavaThread* thread, bool daemon) { - Atomic::dec((jint*) &_exiting_threads_count); + Atomic::dec(&_exiting_threads_count); if (thread->is_hidden_from_external_view() || thread->is_jvmti_agent_thread()) { @@ -131,17 +131,17 @@ if (daemon) { _daemon_threads_count->set_value(_daemon_threads_count->get_value() - 1); - Atomic::dec((jint*) &_exiting_daemon_threads_count); + Atomic::dec(&_exiting_daemon_threads_count); } } void ThreadService::current_thread_exiting(JavaThread* jt) { assert(jt == JavaThread::current(), "Called by current thread"); - Atomic::inc((jint*) &_exiting_threads_count); + Atomic::inc(&_exiting_threads_count); oop threadObj = jt->threadObj(); if (threadObj != NULL && java_lang_Thread::is_daemon(threadObj)) { - Atomic::inc((jint*) &_exiting_daemon_threads_count); + Atomic::inc(&_exiting_daemon_threads_count); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/copy.cpp --- a/src/hotspot/share/utilities/copy.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/copy.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,8 @@ // Copy bytes; larger units are filled atomically if everything is aligned. -void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) { - address src = (address) from; - address dst = (address) to; - uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size; +void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) { + uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size; // (Note: We could improve performance by ignoring the low bits of size, // and putting a short cleanup loop after each bulk copy loop. @@ -43,14 +41,14 @@ // which may or may not want to include such optimizations.) if (bits % sizeof(jlong) == 0) { - Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong)); + Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong)); } else if (bits % sizeof(jint) == 0) { - Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint)); + Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint)); } else if (bits % sizeof(jshort) == 0) { - Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort)); + Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort)); } else { // Not aligned, so no need to be atomic. - Copy::conjoint_jbytes((void*) src, (void*) dst, size); + Copy::conjoint_jbytes((const void*) from, (void*) to, size); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/copy.hpp --- a/src/hotspot/share/utilities/copy.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/copy.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,28 +31,28 @@ // Assembly code for platforms that need it. extern "C" { - void _Copy_conjoint_words(HeapWord* from, HeapWord* to, size_t count); - void _Copy_disjoint_words(HeapWord* from, HeapWord* to, size_t count); + void _Copy_conjoint_words(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_disjoint_words(const HeapWord* from, HeapWord* to, size_t count); - void _Copy_conjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count); - void _Copy_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count); + void _Copy_conjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count); - void _Copy_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count); - void _Copy_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count); + void _Copy_aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count); - void _Copy_conjoint_bytes(void* from, void* to, size_t count); + void _Copy_conjoint_bytes(const void* from, void* to, size_t count); - void _Copy_conjoint_bytes_atomic (void* from, void* to, size_t count); - void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count); - void _Copy_conjoint_jints_atomic (jint* from, jint* to, size_t count); - void _Copy_conjoint_jlongs_atomic (jlong* from, jlong* to, size_t count); - void _Copy_conjoint_oops_atomic (oop* from, oop* to, size_t count); + void _Copy_conjoint_bytes_atomic (const void* from, void* to, size_t count); + void _Copy_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count); + void _Copy_conjoint_jints_atomic (const jint* from, jint* to, size_t count); + void _Copy_conjoint_jlongs_atomic (const jlong* from, jlong* to, size_t count); + void _Copy_conjoint_oops_atomic (const oop* from, oop* to, size_t count); - void _Copy_arrayof_conjoint_bytes (HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_jints (HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_jlongs (HeapWord* from, HeapWord* to, size_t count); - void _Copy_arrayof_conjoint_oops (HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_bytes (const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_jints (const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_jlongs (const HeapWord* from, HeapWord* to, size_t count); + void _Copy_arrayof_conjoint_oops (const HeapWord* from, HeapWord* to, size_t count); } class Copy : AllStatic { @@ -87,33 +87,33 @@ // HeapWords // Word-aligned words, conjoint, not atomic on each word - static void conjoint_words(HeapWord* from, HeapWord* to, size_t count) { + static void conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogHeapWordSize); pd_conjoint_words(from, to, count); } // Word-aligned words, disjoint, not atomic on each word - static void disjoint_words(HeapWord* from, HeapWord* to, size_t count) { + static void disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogHeapWordSize); assert_disjoint(from, to, count); pd_disjoint_words(from, to, count); } // Word-aligned words, disjoint, atomic on each word - static void disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { + static void disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogHeapWordSize); assert_disjoint(from, to, count); pd_disjoint_words_atomic(from, to, count); } // Object-aligned words, conjoint, not atomic on each word - static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { + static void aligned_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) { assert_params_aligned(from, to); pd_aligned_conjoint_words(from, to, count); } // Object-aligned words, disjoint, not atomic on each word - static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { + static void aligned_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) { assert_params_aligned(from, to); assert_disjoint(from, to, count); pd_aligned_disjoint_words(from, to, count); @@ -122,77 +122,77 @@ // bytes, jshorts, jints, jlongs, oops // bytes, conjoint, not atomic on each byte (not that it matters) - static void conjoint_jbytes(void* from, void* to, size_t count) { + static void conjoint_jbytes(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } // bytes, conjoint, atomic on each byte (not that it matters) - static void conjoint_jbytes_atomic(void* from, void* to, size_t count) { + static void conjoint_jbytes_atomic(const void* from, void* to, size_t count) { pd_conjoint_bytes(from, to, count); } // jshorts, conjoint, atomic on each jshort - static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { + static void conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) { assert_params_ok(from, to, LogBytesPerShort); pd_conjoint_jshorts_atomic(from, to, count); } // jints, conjoint, atomic on each jint - static void conjoint_jints_atomic(jint* from, jint* to, size_t count) { + static void conjoint_jints_atomic(const jint* from, jint* to, size_t count) { assert_params_ok(from, to, LogBytesPerInt); pd_conjoint_jints_atomic(from, to, count); } // jlongs, conjoint, atomic on each jlong - static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { + static void conjoint_jlongs_atomic(const jlong* from, jlong* to, size_t count) { assert_params_ok(from, to, LogBytesPerLong); pd_conjoint_jlongs_atomic(from, to, count); } // oops, conjoint, atomic on each oop - static void conjoint_oops_atomic(oop* from, oop* to, size_t count) { + static void conjoint_oops_atomic(const oop* from, oop* to, size_t count) { assert_params_ok(from, to, LogBytesPerHeapOop); pd_conjoint_oops_atomic(from, to, count); } // overloaded for UseCompressedOops - static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) { + static void conjoint_oops_atomic(const narrowOop* from, narrowOop* to, size_t count) { assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong"); assert_params_ok(from, to, LogBytesPerInt); - pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); + pd_conjoint_jints_atomic((const jint*)from, (jint*)to, count); } // Copy a span of memory. If the span is an integral number of aligned // longs, words, or ints, copy those units atomically. // The largest atomic transfer unit is 8 bytes, or the largest power // of two which divides all of from, to, and size, whichever is smaller. - static void conjoint_memory_atomic(void* from, void* to, size_t size); + static void conjoint_memory_atomic(const void* from, void* to, size_t size); // bytes, conjoint array, atomic on each byte (not that it matters) - static void arrayof_conjoint_jbytes(HeapWord* from, HeapWord* to, size_t count) { + static void arrayof_conjoint_jbytes(const HeapWord* from, HeapWord* to, size_t count) { pd_arrayof_conjoint_bytes(from, to, count); } // jshorts, conjoint array, atomic on each jshort - static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { + static void arrayof_conjoint_jshorts(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogBytesPerShort); pd_arrayof_conjoint_jshorts(from, to, count); } // jints, conjoint array, atomic on each jint - static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { + static void arrayof_conjoint_jints(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogBytesPerInt); pd_arrayof_conjoint_jints(from, to, count); } // jlongs, conjoint array, atomic on each jlong - static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { + static void arrayof_conjoint_jlongs(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogBytesPerLong); pd_arrayof_conjoint_jlongs(from, to, count); } // oops, conjoint array, atomic on each oop - static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { + static void arrayof_conjoint_oops(const HeapWord* from, HeapWord* to, size_t count) { assert_params_ok(from, to, LogBytesPerHeapOop); pd_arrayof_conjoint_oops(from, to, count); } @@ -200,7 +200,7 @@ // Known overlap methods // Copy word-aligned words from higher to lower addresses, not atomic on each word - inline static void conjoint_words_to_lower(HeapWord* from, HeapWord* to, size_t byte_count) { + inline static void conjoint_words_to_lower(const HeapWord* from, HeapWord* to, size_t byte_count) { // byte_count is in bytes to check its alignment assert_params_ok(from, to, LogHeapWordSize); assert_byte_count_ok(byte_count, HeapWordSize); @@ -214,7 +214,7 @@ } // Copy word-aligned words from lower to higher addresses, not atomic on each word - inline static void conjoint_words_to_higher(HeapWord* from, HeapWord* to, size_t byte_count) { + inline static void conjoint_words_to_higher(const HeapWord* from, HeapWord* to, size_t byte_count) { // byte_count is in bytes to check its alignment assert_params_ok(from, to, LogHeapWordSize); assert_byte_count_ok(byte_count, HeapWordSize); @@ -305,7 +305,7 @@ } private: - static bool params_disjoint(HeapWord* from, HeapWord* to, size_t count) { + static bool params_disjoint(const HeapWord* from, HeapWord* to, size_t count) { if (from < to) { return pointer_delta(to, from) >= count; } @@ -314,14 +314,14 @@ // These methods raise a fatal if they detect a problem. - static void assert_disjoint(HeapWord* from, HeapWord* to, size_t count) { + static void assert_disjoint(const HeapWord* from, HeapWord* to, size_t count) { #ifdef ASSERT if (!params_disjoint(from, to, count)) basic_fatal("source and dest overlap"); #endif } - static void assert_params_ok(void* from, void* to, intptr_t log_align) { + static void assert_params_ok(const void* from, void* to, intptr_t log_align) { #ifdef ASSERT if (mask_bits((uintptr_t)from, right_n_bits(log_align)) != 0) basic_fatal("not aligned"); @@ -336,7 +336,7 @@ basic_fatal("not word aligned"); #endif } - static void assert_params_aligned(HeapWord* from, HeapWord* to) { + static void assert_params_aligned(const HeapWord* from, HeapWord* to) { #ifdef ASSERT if (mask_bits((uintptr_t)from, BytesPerLong-1) != 0) basic_fatal("not long aligned"); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/decoder.hpp --- a/src/hotspot/share/utilities/decoder.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/decoder.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,12 +33,10 @@ class AbstractDecoder : public CHeapObj { public: - virtual ~AbstractDecoder() {} - // status code for decoding native C frame enum decoder_status { not_available = -10, // real decoder is not available - no_error = 0, // successfully decoded frames + no_error = 0, // no error encountered out_of_memory, // out of memory file_invalid, // invalid elf file file_not_found, // could not found symbol file (on windows), such as jvm.pdb or jvm.map @@ -46,6 +44,12 @@ helper_init_error // SymInitialize failed (Windows only) }; +protected: + decoder_status _decoder_status; + +public: + virtual ~AbstractDecoder() {} + // decode an pc address to corresponding function name and an offset from the beginning of // the function // @@ -68,11 +72,8 @@ } static bool is_error(decoder_status status) { - return (status > 0); + return (status > no_error); } - -protected: - decoder_status _decoder_status; }; // Do nothing decoder @@ -96,10 +97,8 @@ virtual bool demangle(const char* symbol, char* buf, int buflen) { return false; } - }; - class Decoder : AllStatic { public: static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL, bool demangle = true); diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfFile.cpp --- a/src/hotspot/share/utilities/elfFile.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfFile.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,60 +31,150 @@ #include #include +#include "logging/log.hpp" #include "memory/allocation.inline.hpp" +#include "memory/resourceArea.hpp" #include "utilities/decoder.hpp" #include "utilities/elfFile.hpp" #include "utilities/elfFuncDescTable.hpp" #include "utilities/elfStringTable.hpp" #include "utilities/elfSymbolTable.hpp" +#include "utilities/ostream.hpp" +// For test only, disable elf section cache and force to read from file directly. +bool ElfFile::_do_not_cache_elf_section = false; + +ElfSection::ElfSection(FILE* fd, const Elf_Shdr& hdr) : _section_data(NULL) { + _stat = load_section(fd, hdr); +} + +ElfSection::~ElfSection() { + if (_section_data != NULL) { + os::free(_section_data); + } +} + +NullDecoder::decoder_status ElfSection::load_section(FILE* const fd, const Elf_Shdr& shdr) { + memcpy((void*)&_section_hdr, (const void*)&shdr, sizeof(shdr)); + + if (ElfFile::_do_not_cache_elf_section) { + log_debug(decoder)("Elf section cache is disabled"); + return NullDecoder::no_error; + } + + _section_data = os::malloc(shdr.sh_size, mtInternal); + // No enough memory for caching. It is okay, we can try to read from + // file instead. + if (_section_data == NULL) return NullDecoder::no_error; -ElfFile::ElfFile(const char* filepath) { - assert(filepath, "null file path"); - memset(&m_elfHdr, 0, sizeof(m_elfHdr)); - m_string_tables = NULL; - m_symbol_tables = NULL; - m_funcDesc_table = NULL; - m_next = NULL; - m_status = NullDecoder::no_error; + MarkedFileReader mfd(fd); + if (mfd.has_mark() && + mfd.set_position(shdr.sh_offset) && + mfd.read(_section_data, shdr.sh_size)) { + return NullDecoder::no_error; + } else { + os::free(_section_data); + _section_data = NULL; + return NullDecoder::file_invalid; + } +} + +bool FileReader::read(void* buf, size_t size) { + assert(buf != NULL, "no buffer"); + assert(size > 0, "no space"); + return fread(buf, size, 1, _fd) == 1; +} + +int FileReader::read_buffer(void* buf, size_t size) { + assert(buf != NULL, "no buffer"); + assert(size > 0, "no space"); + return fread(buf, 1, size, _fd); +} + +bool FileReader::set_position(long offset) { + return fseek(_fd, offset, SEEK_SET) == 0; +} + +MarkedFileReader::MarkedFileReader(FILE* fd) : FileReader(fd) { + _marked_pos = ftell(fd); +} + +MarkedFileReader::~MarkedFileReader() { + if (_marked_pos != -1) { + set_position(_marked_pos); + } +} + +ElfFile::ElfFile(const char* filepath) : + _string_tables(NULL), _symbol_tables(NULL), _funcDesc_table(NULL), + _next(NULL), _status(NullDecoder::no_error), + _shdr_string_table(NULL), _file(NULL), _filepath(NULL) { + memset(&_elfHdr, 0, sizeof(_elfHdr)); int len = strlen(filepath) + 1; - m_filepath = (const char*)os::malloc(len * sizeof(char), mtInternal); - if (m_filepath != NULL) { - strcpy((char*)m_filepath, filepath); - m_file = fopen(filepath, "r"); - if (m_file != NULL) { - load_tables(); - } else { - m_status = NullDecoder::file_not_found; - } - } else { - m_status = NullDecoder::out_of_memory; + _filepath = (char*)os::malloc(len * sizeof(char), mtInternal); + if (_filepath == NULL) { + _status = NullDecoder::out_of_memory; + return; + } + strcpy(_filepath, filepath); + + _status = parse_elf(filepath); + + // we no longer need section header string table + if (_shdr_string_table != NULL) { + delete _shdr_string_table; + _shdr_string_table = NULL; } } ElfFile::~ElfFile() { - if (m_string_tables != NULL) { - delete m_string_tables; + if (_shdr_string_table != NULL) { + delete _shdr_string_table; } - if (m_symbol_tables != NULL) { - delete m_symbol_tables; + cleanup_tables(); + + if (_file != NULL) { + fclose(_file); + } + + if (_filepath != NULL) { + os::free((void*)_filepath); } - if (m_file != NULL) { - fclose(m_file); + if (_next != NULL) { + delete _next; + } +} + +void ElfFile::cleanup_tables() { + if (_string_tables != NULL) { + delete _string_tables; + _string_tables = NULL; + } + + if (_symbol_tables != NULL) { + delete _symbol_tables; + _symbol_tables = NULL; } - if (m_filepath != NULL) { - os::free((void*)m_filepath); + if (_funcDesc_table != NULL) { + delete _funcDesc_table; + _funcDesc_table = NULL; } +} - if (m_next != NULL) { - delete m_next; +NullDecoder::decoder_status ElfFile::parse_elf(const char* filepath) { + assert(filepath, "null file path"); + + _file = fopen(filepath, "r"); + if (_file != NULL) { + return load_tables(); + } else { + return NullDecoder::file_not_found; } -}; - +} //Check elf header to ensure the file is valid. bool ElfFile::is_elf_file(Elf_Ehdr& hdr) { @@ -96,116 +186,134 @@ ELFDATANONE != hdr.e_ident[EI_DATA]); } -bool ElfFile::load_tables() { - assert(m_file, "file not open"); - assert(!NullDecoder::is_error(m_status), "already in error"); +NullDecoder::decoder_status ElfFile::load_tables() { + assert(_file, "file not open"); + assert(!NullDecoder::is_error(_status), "already in error"); + FileReader freader(fd()); // read elf file header - if (fread(&m_elfHdr, sizeof(m_elfHdr), 1, m_file) != 1) { - m_status = NullDecoder::file_invalid; - return false; + if (!freader.read(&_elfHdr, sizeof(_elfHdr))) { + return NullDecoder::file_invalid; } - if (!is_elf_file(m_elfHdr)) { - m_status = NullDecoder::file_invalid; - return false; + // Check signature + if (!is_elf_file(_elfHdr)) { + return NullDecoder::file_invalid; } // walk elf file's section headers, and load string tables Elf_Shdr shdr; - if (!fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) { - if (NullDecoder::is_error(m_status)) return false; + if (!freader.set_position(_elfHdr.e_shoff)) { + return NullDecoder::file_invalid; + } - for (int index = 0; index < m_elfHdr.e_shnum; index ++) { - if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) { - m_status = NullDecoder::file_invalid; - return false; - } - if (shdr.sh_type == SHT_STRTAB) { - // string tables - ElfStringTable* table = new (std::nothrow) ElfStringTable(m_file, shdr, index); - if (table == NULL) { - m_status = NullDecoder::out_of_memory; - return false; - } - add_string_table(table); - } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) { - // symbol tables - ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(m_file, shdr); - if (table == NULL) { - m_status = NullDecoder::out_of_memory; - return false; - } - add_symbol_table(table); - } + for (int index = 0; index < _elfHdr.e_shnum; index ++) { + if (!freader.read(&shdr, sizeof(shdr))) { + return NullDecoder::file_invalid; } -#if defined(PPC64) && !defined(ABI_ELFv2) - // Now read the .opd section wich contains the PPC64 function descriptor table. - // The .opd section is only available on PPC64 (see for example: - // http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html) - // so this code should do no harm on other platforms but because of performance reasons we only - // execute it on PPC64 platforms. - // Notice that we can only find the .opd section after we have successfully read in the string - // tables in the previous loop, because we need to query the name of each section which is - // contained in one of the string tables (i.e. the one with the index m_elfHdr.e_shstrndx). - - // Reset the file pointer - if (fseek(m_file, m_elfHdr.e_shoff, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - return false; + if (shdr.sh_type == SHT_STRTAB) { + // string tables + ElfStringTable* table = new (std::nothrow) ElfStringTable(fd(), shdr, index); + if (table == NULL) { + return NullDecoder::out_of_memory; + } + if (index == _elfHdr.e_shstrndx) { + assert(_shdr_string_table == NULL, "Only set once"); + _shdr_string_table = table; + } else { + add_string_table(table); + } + } else if (shdr.sh_type == SHT_SYMTAB || shdr.sh_type == SHT_DYNSYM) { + // symbol tables + ElfSymbolTable* table = new (std::nothrow) ElfSymbolTable(fd(), shdr); + if (table == NULL) { + return NullDecoder::out_of_memory; + } + add_symbol_table(table); } - for (int index = 0; index < m_elfHdr.e_shnum; index ++) { - if (fread((void*)&shdr, sizeof(Elf_Shdr), 1, m_file) != 1) { - m_status = NullDecoder::file_invalid; - return false; - } - if (m_elfHdr.e_shstrndx != SHN_UNDEF && shdr.sh_type == SHT_PROGBITS) { - ElfStringTable* string_table = get_string_table(m_elfHdr.e_shstrndx); - if (string_table == NULL) { - m_status = NullDecoder::file_invalid; - return false; - } - char buf[8]; // '8' is enough because we only want to read ".opd" - if (string_table->string_at(shdr.sh_name, buf, sizeof(buf)) && !strncmp(".opd", buf, 4)) { - m_funcDesc_table = new (std::nothrow) ElfFuncDescTable(m_file, shdr, index); - if (m_funcDesc_table == NULL) { - m_status = NullDecoder::out_of_memory; - return false; - } - break; - } + } +#if defined(PPC64) && !defined(ABI_ELFv2) + // Now read the .opd section wich contains the PPC64 function descriptor table. + // The .opd section is only available on PPC64 (see for example: + // http://refspecs.linuxfoundation.org/LSB_3.1.1/LSB-Core-PPC64/LSB-Core-PPC64/specialsections.html) + // so this code should do no harm on other platforms but because of performance reasons we only + // execute it on PPC64 platforms. + // Notice that we can only find the .opd section after we have successfully read in the string + // tables in the previous loop, because we need to query the name of each section which is + // contained in one of the string tables (i.e. the one with the index m_elfHdr.e_shstrndx). + + // Reset the file pointer + int sect_index = section_by_name(".opd", shdr); + + if (sect_index == -1) { + return NullDecoder::file_invalid; + } + + _funcDesc_table = new (std::nothrow) ElfFuncDescTable(_file, shdr, sect_index); + if (_funcDesc_table == NULL) { + return NullDecoder::out_of_memory; + } +#endif + return NullDecoder::no_error; +} + +int ElfFile::section_by_name(const char* name, Elf_Shdr& hdr) { + assert(name != NULL, "No section name"); + size_t len = strlen(name) + 1; + ResourceMark rm; + char* buf = NEW_RESOURCE_ARRAY(char, len); + if (buf == NULL) { + return -1; + } + + assert(_shdr_string_table != NULL, "Section header string table should be loaded"); + ElfStringTable* const table = _shdr_string_table; + MarkedFileReader mfd(fd()); + if (!mfd.has_mark() || !mfd.set_position(_elfHdr.e_shoff)) return -1; + + int sect_index = -1; + for (int index = 0; index < _elfHdr.e_shnum; index ++) { + if (!mfd.read((void*)&hdr, sizeof(hdr))) { + break; + } + if (table->string_at(hdr.sh_name, buf, len)) { + if (strncmp(buf, name, len) == 0) { + sect_index = index; + break; } } -#endif - } - return true; + return sect_index; } bool ElfFile::decode(address addr, char* buf, int buflen, int* offset) { // something already went wrong, just give up - if (NullDecoder::is_error(m_status)) { + if (NullDecoder::is_error(_status)) { return false; } - ElfSymbolTable* symbol_table = m_symbol_tables; + int string_table_index; int pos_in_string_table; int off = INT_MAX; bool found_symbol = false; + ElfSymbolTable* symbol_table = _symbol_tables; + while (symbol_table != NULL) { - if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off, m_funcDesc_table)) { + if (symbol_table->lookup(addr, &string_table_index, &pos_in_string_table, &off, _funcDesc_table)) { found_symbol = true; break; } - symbol_table = symbol_table->m_next; + symbol_table = symbol_table->next(); } - if (!found_symbol) return false; + if (!found_symbol) { + return false; + } ElfStringTable* string_table = get_string_table(string_table_index); if (string_table == NULL) { - m_status = NullDecoder::file_invalid; + _status = NullDecoder::file_invalid; return false; } if (offset) *offset = off; @@ -213,74 +321,31 @@ return string_table->string_at(pos_in_string_table, buf, buflen); } - void ElfFile::add_symbol_table(ElfSymbolTable* table) { - if (m_symbol_tables == NULL) { - m_symbol_tables = table; + if (_symbol_tables == NULL) { + _symbol_tables = table; } else { - table->m_next = m_symbol_tables; - m_symbol_tables = table; + table->set_next(_symbol_tables); + _symbol_tables = table; } } void ElfFile::add_string_table(ElfStringTable* table) { - if (m_string_tables == NULL) { - m_string_tables = table; + if (_string_tables == NULL) { + _string_tables = table; } else { - table->m_next = m_string_tables; - m_string_tables = table; + table->set_next(_string_tables); + _string_tables = table; } } ElfStringTable* ElfFile::get_string_table(int index) { - ElfStringTable* p = m_string_tables; + ElfStringTable* p = _string_tables; while (p != NULL) { if (p->index() == index) return p; - p = p->m_next; + p = p->next(); } return NULL; } -#ifdef LINUX -bool ElfFile::specifies_noexecstack(const char* filepath) { - // Returns true if the elf file is marked NOT to require an executable stack, - // or if the file could not be opened. - // Returns false if the elf file requires an executable stack, the stack flag - // is not set at all, or if the file can not be read. - if (filepath == NULL) return true; - - FILE* file = fopen(filepath, "r"); - if (file == NULL) return true; - - // AARCH64 defaults to noexecstack. All others default to execstack. -#ifdef AARCH64 - bool result = true; -#else - bool result = false; -#endif - - // Read file header - Elf_Ehdr head; - if (fread(&head, sizeof(Elf_Ehdr), 1, file) == 1 && - is_elf_file(head) && - fseek(file, head.e_phoff, SEEK_SET) == 0) { - - // Read program header table - Elf_Phdr phdr; - for (int index = 0; index < head.e_phnum; index ++) { - if (fread((void*)&phdr, sizeof(Elf_Phdr), 1, file) != 1) { - result = false; - break; - } - if (phdr.p_type == PT_GNU_STACK) { - result = (phdr.p_flags == (PF_R | PF_W)); - break; - } - } - } - fclose(file); - return result; -} -#endif // LINUX - #endif // !_WINDOWS && !__APPLE__ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfFile.hpp --- a/src/hotspot/share/utilities/elfFile.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfFile.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,7 +25,7 @@ #ifndef SHARE_VM_UTILITIES_ELF_FILE_HPP #define SHARE_VM_UTILITIES_ELF_FILE_HPP -#if !defined(_WINDOWS) && !defined(__APPLE__) +#if !defined(_WINDOWS) && !defined(__APPLE__) && !defined(_AIX) #if defined(__OpenBSD__) #include @@ -57,7 +57,6 @@ typedef Elf32_Off Elf_Off; typedef Elf32_Addr Elf_Addr; - typedef Elf32_Ehdr Elf_Ehdr; typedef Elf32_Shdr Elf_Shdr; typedef Elf32_Phdr Elf_Phdr; @@ -72,46 +71,126 @@ #include "memory/allocation.hpp" #include "utilities/decoder.hpp" - class ElfStringTable; class ElfSymbolTable; class ElfFuncDescTable; +// ELF section, may or may not have cached data +class ElfSection VALUE_OBJ_CLASS_SPEC { +private: + Elf_Shdr _section_hdr; + void* _section_data; + NullDecoder::decoder_status _stat; +public: + ElfSection(FILE* fd, const Elf_Shdr& hdr); + ~ElfSection(); -// On Solaris/Linux platforms, libjvm.so does contain all private symbols. + NullDecoder::decoder_status status() const { return _stat; } + + const Elf_Shdr* section_header() const { return &_section_hdr; } + const void* section_data() const { return (const void*)_section_data; } +private: + // load this section. + // it return no_error, when it fails to cache the section data due to lack of memory + NullDecoder::decoder_status load_section(FILE* const file, const Elf_Shdr& hdr); +}; + +class FileReader : public StackObj { +protected: + FILE* const _fd; +public: + FileReader(FILE* const fd) : _fd(fd) {}; + bool read(void* buf, size_t size); + int read_buffer(void* buf, size_t size); + bool set_position(long offset); +}; + +// Mark current position, so we can get back to it after +// reads. +class MarkedFileReader : public FileReader { +private: + long _marked_pos; +public: + MarkedFileReader(FILE* const fd); + ~MarkedFileReader(); + + bool has_mark() const { return _marked_pos >= 0; } +}; + // ElfFile is basically an elf file parser, which can lookup the symbol // that is the nearest to the given address. // Beware, this code is called from vm error reporting code, when vm is already // in "error" state, so there are scenarios, lookup will fail. We want this // part of code to be very defensive, and bait out if anything went wrong. - class ElfFile: public CHeapObj { friend class ElfDecoder; - public: + +private: + // link ElfFiles + ElfFile* _next; + + // Elf file + char* _filepath; + FILE* _file; + + // Elf header + Elf_Ehdr _elfHdr; + + // symbol tables + ElfSymbolTable* _symbol_tables; + + // regular string tables + ElfStringTable* _string_tables; + + // section header string table, used for finding section name + ElfStringTable* _shdr_string_table; + + // function descriptors table + ElfFuncDescTable* _funcDesc_table; + + NullDecoder::decoder_status _status; + +public: ElfFile(const char* filepath); ~ElfFile(); bool decode(address addr, char* buf, int buflen, int* offset); - const char* filepath() { - return m_filepath; + + const char* filepath() const { + return _filepath; + } + + bool same_elf_file(const char* filepath) const { + assert(filepath != NULL, "null file path"); + return (_filepath != NULL && !strcmp(filepath, _filepath)); } - bool same_elf_file(const char* filepath) { - assert(filepath, "null file path"); - assert(m_filepath, "already out of memory"); - return (m_filepath && !strcmp(filepath, m_filepath)); + NullDecoder::decoder_status get_status() const { + return _status; } - NullDecoder::decoder_status get_status() { - return m_status; - } - - private: + // Returns true if the elf file is marked NOT to require an executable stack, + // or if the file could not be opened. + // Returns false if the elf file requires an executable stack, the stack flag + // is not set at all, or if the file can not be read. + // On systems other than linux it always returns false. + static bool specifies_noexecstack(const char* filepath) NOT_LINUX({ return false; }); +private: // sanity check, if the file is a real elf file static bool is_elf_file(Elf_Ehdr&); - // load string tables from the elf file - bool load_tables(); + // parse this elf file + NullDecoder::decoder_status parse_elf(const char* filename); + + // load string, symbol and function descriptor tables from the elf file + NullDecoder::decoder_status load_tables(); + + ElfFile* next() const { return _next; } + void set_next(ElfFile* file) { _next = file; } + + // find a section by name, return section index + // if there is no such section, return -1 + int section_by_name(const char* name, Elf_Shdr& hdr); // string tables are stored in a linked list void add_string_table(ElfStringTable* table); @@ -122,39 +201,15 @@ // return a string table at specified section index ElfStringTable* get_string_table(int index); -protected: - ElfFile* next() const { return m_next; } - void set_next(ElfFile* file) { m_next = file; } - public: - // Returns true if the elf file is marked NOT to require an executable stack, - // or if the file could not be opened. - // Returns false if the elf file requires an executable stack, the stack flag - // is not set at all, or if the file can not be read. - // On systems other than linux it always returns false. - static bool specifies_noexecstack(const char* filepath) NOT_LINUX({ return false; }); - - protected: - ElfFile* m_next; + FILE* const fd() const { return _file; } - private: - // file - const char* m_filepath; - FILE* m_file; - - // Elf header - Elf_Ehdr m_elfHdr; + // Cleanup string, symbol and function descriptor tables + void cleanup_tables(); - // symbol tables - ElfSymbolTable* m_symbol_tables; - - // string tables - ElfStringTable* m_string_tables; - - // function descriptors table - ElfFuncDescTable* m_funcDesc_table; - - NullDecoder::decoder_status m_status; +public: + // For whitebox test + static bool _do_not_cache_elf_section; }; #endif // !_WINDOWS && !__APPLE__ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfFuncDescTable.cpp --- a/src/hotspot/share/utilities/elfFuncDescTable.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfFuncDescTable.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,7 +30,8 @@ #include "memory/allocation.inline.hpp" #include "utilities/elfFuncDescTable.hpp" -ElfFuncDescTable::ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index) { +ElfFuncDescTable::ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index) : + _file(file), _index(index), _section(file, shdr) { assert(file, "null file handle"); // The actual function address (i.e. function entry point) is always the // first value in the function descriptor (on IA64 and PPC64 they look as follows): @@ -39,62 +40,33 @@ // Unfortunately 'shdr.sh_entsize' doesn't always seem to contain this size (it's zero on PPC64) so we can't assert // assert(IA64_ONLY(2) PPC64_ONLY(3) * sizeof(address) == shdr.sh_entsize, "Size mismatch for '.opd' section entries"); - m_funcDescs = NULL; - m_file = file; - m_index = index; - m_status = NullDecoder::no_error; - - // try to load the function descriptor table - long cur_offset = ftell(file); - if (cur_offset != -1) { - // call malloc so we can back up if memory allocation fails. - m_funcDescs = (address*)os::malloc(shdr.sh_size, mtInternal); - if (m_funcDescs) { - if (fseek(file, shdr.sh_offset, SEEK_SET) || - fread((void*)m_funcDescs, shdr.sh_size, 1, file) != 1 || - fseek(file, cur_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - os::free(m_funcDescs); - m_funcDescs = NULL; - } - } - if (!NullDecoder::is_error(m_status)) { - memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr)); - } - } else { - m_status = NullDecoder::file_invalid; - } + _status = _section.status(); } ElfFuncDescTable::~ElfFuncDescTable() { - if (m_funcDescs != NULL) { - os::free(m_funcDescs); - } } address ElfFuncDescTable::lookup(Elf_Word index) { - if (NullDecoder::is_error(m_status)) { + if (NullDecoder::is_error(_status)) { return NULL; } - if (m_funcDescs != NULL) { - if (m_shdr.sh_size > 0 && m_shdr.sh_addr <= index && index <= m_shdr.sh_addr + m_shdr.sh_size) { - // Notice that 'index' is a byte-offset into the function descriptor table. - return m_funcDescs[(index - m_shdr.sh_addr) / sizeof(address)]; - } + address* func_descs = cached_func_descs(); + const Elf_Shdr* shdr = _section.section_header(); + if (!(shdr->sh_size > 0 && shdr->sh_addr <= index && index <= shdr->sh_addr + shdr->sh_size)) { + // don't put the whole decoder in error mode if we just tried a wrong index return NULL; + } + + if (func_descs != NULL) { + return func_descs[(index - shdr->sh_addr) / sizeof(address)]; } else { - long cur_pos; + MarkedFileReader mfd(_file); address addr; - if (!(m_shdr.sh_size > 0 && m_shdr.sh_addr <= index && index <= m_shdr.sh_addr + m_shdr.sh_size)) { - // don't put the whole decoder in error mode if we just tried a wrong index - return NULL; - } - if ((cur_pos = ftell(m_file)) == -1 || - fseek(m_file, m_shdr.sh_offset + index - m_shdr.sh_addr, SEEK_SET) || - fread(&addr, sizeof(addr), 1, m_file) != 1 || - fseek(m_file, cur_pos, SEEK_SET)) { - m_status = NullDecoder::file_invalid; + if (!mfd.has_mark() || + !mfd.set_position(shdr->sh_offset + index - shdr->sh_addr) || + !mfd.read((void*)&addr, sizeof(addr))) { + _status = NullDecoder::file_invalid; return NULL; } return addr; diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfFuncDescTable.hpp --- a/src/hotspot/share/utilities/elfFuncDescTable.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfFuncDescTable.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2013 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -116,32 +116,31 @@ class ElfFuncDescTable: public CHeapObj { friend class ElfFile; - public: +private: + // holds the complete function descriptor section if + // we can allocate enough memory + ElfSection _section; + + // file contains string table + FILE* const _file; + + // The section index of this function descriptor (i.e. '.opd') section in the ELF file + const int _index; + + NullDecoder::decoder_status _status; +public: ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index); ~ElfFuncDescTable(); // return the function address for the function descriptor at 'index' or NULL on error address lookup(Elf_Word index); - int get_index() { return m_index; }; - - NullDecoder::decoder_status get_status() { return m_status; }; - - protected: - // holds the complete function descriptor section if - // we can allocate enough memory - address* m_funcDescs; + int get_index() const { return _index; }; - // file contains string table - FILE* m_file; + NullDecoder::decoder_status get_status() const { return _status; }; - // section header - Elf_Shdr m_shdr; - - // The section index of this function descriptor (i.e. '.opd') section in the ELF file - int m_index; - - NullDecoder::decoder_status m_status; +private: + address* cached_func_descs() const { return (address*)_section.section_data(); } }; #endif // !_WINDOWS && !__APPLE__ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfStringTable.cpp --- a/src/hotspot/share/utilities/elfStringTable.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfStringTable.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,58 +33,44 @@ // We will try to load whole string table into memory if we can. // Otherwise, fallback to more expensive file operation. -ElfStringTable::ElfStringTable(FILE* file, Elf_Shdr shdr, int index) { - assert(file, "null file handle"); - m_table = NULL; - m_index = index; - m_next = NULL; - m_file = file; - m_status = NullDecoder::no_error; - - // try to load the string table - long cur_offset = ftell(file); - m_table = (char*)os::malloc(sizeof(char) * shdr.sh_size, mtInternal); - if (m_table != NULL) { - // if there is an error, mark the error - if (fseek(file, shdr.sh_offset, SEEK_SET) || - fread((void*)m_table, shdr.sh_size, 1, file) != 1 || - fseek(file, cur_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - os::free((void*)m_table); - m_table = NULL; - } - } else { - memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr)); - } +ElfStringTable::ElfStringTable(FILE* const file, Elf_Shdr& shdr, int index) : + _section(file, shdr), _index(index), _fd(file), _next(NULL) { + _status = _section.status(); } ElfStringTable::~ElfStringTable() { - if (m_table != NULL) { - os::free((void*)m_table); - } - - if (m_next != NULL) { - delete m_next; + if (_next != NULL) { + delete _next; } } -bool ElfStringTable::string_at(int pos, char* buf, int buflen) { - if (NullDecoder::is_error(m_status)) { +bool ElfStringTable::string_at(size_t pos, char* buf, int buflen) { + if (NullDecoder::is_error(get_status())) { + return false; + } + + assert(buflen > 0, "no buffer"); + if (pos >= _section.section_header()->sh_size) { return false; } - if (m_table != NULL) { - jio_snprintf(buf, buflen, "%s", (const char*)(m_table + pos)); + + const char* data = (const char*)_section.section_data(); + if (data != NULL) { + jio_snprintf(buf, buflen, "%s", data + pos); return true; - } else { - long cur_pos = ftell(m_file); - if (cur_pos == -1 || - fseek(m_file, m_shdr.sh_offset + pos, SEEK_SET) || - fread(buf, 1, buflen, m_file) <= 0 || - fseek(m_file, cur_pos, SEEK_SET)) { - m_status = NullDecoder::file_invalid; + } else { // no cache data, read from file instead + const Elf_Shdr* const shdr = _section.section_header(); + MarkedFileReader mfd(_fd); + if (mfd.has_mark() && + mfd.set_position(shdr->sh_offset + pos) && + mfd.read((void*)buf, size_t(buflen))) { + buf[buflen - 1] = '\0'; + return true; + } else { + // put it in error state to avoid retry + _status = NullDecoder::file_invalid; return false; } - return true; } } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfStringTable.hpp --- a/src/hotspot/share/utilities/elfStringTable.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfStringTable.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,37 +37,36 @@ // one blob. Otherwise, it will load string from file when requested. class ElfStringTable: CHeapObj { friend class ElfFile; - public: - ElfStringTable(FILE* file, Elf_Shdr shdr, int index); +private: + ElfStringTable* _next; + int _index; // section index + ElfSection _section; + FILE* const _fd; + NullDecoder::decoder_status _status; + +public: + ElfStringTable(FILE* const file, Elf_Shdr& shdr, int index); ~ElfStringTable(); // section index - int index() { return m_index; }; + int index() const { return _index; }; // get string at specified offset - bool string_at(int offset, char* buf, int buflen); + bool string_at(size_t offset, char* buf, int buflen); // get status code - NullDecoder::decoder_status get_status() { return m_status; }; - - protected: - ElfStringTable* m_next; - - // section index - int m_index; + NullDecoder::decoder_status get_status() const { + return _status; + } - // holds complete string table if can - // allocate enough memory - const char* m_table; +private: + void set_next(ElfStringTable* next) { + _next = next; + } - // file contains string table - FILE* m_file; - - // section header - Elf_Shdr m_shdr; - - // error code - NullDecoder::decoder_status m_status; + ElfStringTable* next() const { + return _next; + } }; #endif // !_WINDOWS && !__APPLE__ diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfSymbolTable.cpp --- a/src/hotspot/share/utilities/elfSymbolTable.cpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfSymbolTable.cpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,48 +30,26 @@ #include "utilities/elfFuncDescTable.hpp" #include "utilities/elfSymbolTable.hpp" -ElfSymbolTable::ElfSymbolTable(FILE* file, Elf_Shdr shdr) { - assert(file, "null file handle"); - m_symbols = NULL; - m_next = NULL; - m_file = file; - m_status = NullDecoder::no_error; +ElfSymbolTable::ElfSymbolTable(FILE* const file, Elf_Shdr& shdr) : + _section(file, shdr), _fd(file), _next(NULL) { + assert(file != NULL, "null file handle"); + _status = _section.status(); - // try to load the string table - long cur_offset = ftell(file); - if (cur_offset != -1) { - // call malloc so we can back up if memory allocation fails. - m_symbols = (Elf_Sym*)os::malloc(shdr.sh_size, mtInternal); - if (m_symbols) { - if (fseek(file, shdr.sh_offset, SEEK_SET) || - fread((void*)m_symbols, shdr.sh_size, 1, file) != 1 || - fseek(file, cur_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; - os::free(m_symbols); - m_symbols = NULL; - } - } - if (!NullDecoder::is_error(m_status)) { - memcpy(&m_shdr, &shdr, sizeof(Elf_Shdr)); - } - } else { - m_status = NullDecoder::file_invalid; + if (_section.section_header()->sh_size % sizeof(Elf_Sym) != 0) { + _status = NullDecoder::file_invalid; } } ElfSymbolTable::~ElfSymbolTable() { - if (m_symbols != NULL) { - os::free(m_symbols); - } - - if (m_next != NULL) { - delete m_next; + if (_next != NULL) { + delete _next; } } bool ElfSymbolTable::compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable) { if (STT_FUNC == ELF_ST_TYPE(sym->st_info)) { Elf_Word st_size = sym->st_size; + const Elf_Shdr* shdr = _section.section_header(); address sym_addr; if (funcDescTable != NULL && funcDescTable->get_index() == sym->st_shndx) { // We need to go another step trough the function descriptor table (currently PPC64 only) @@ -82,7 +60,7 @@ if (sym_addr <= addr && (Elf_Word)(addr - sym_addr) < st_size) { *offset = (int)(addr - sym_addr); *posIndex = sym->st_name; - *stringtableIndex = m_shdr.sh_link; + *stringtableIndex = shdr->sh_link; return true; } } @@ -94,39 +72,39 @@ assert(posIndex, "null string table offset pointer"); assert(offset, "null offset pointer"); - if (NullDecoder::is_error(m_status)) { + if (NullDecoder::is_error(get_status())) { return false; } size_t sym_size = sizeof(Elf_Sym); - assert((m_shdr.sh_size % sym_size) == 0, "check size"); - int count = m_shdr.sh_size / sym_size; - if (m_symbols != NULL) { + int count = _section.section_header()->sh_size / sym_size; + Elf_Sym* symbols = (Elf_Sym*)_section.section_data(); + + if (symbols != NULL) { for (int index = 0; index < count; index ++) { - if (compare(&m_symbols[index], addr, stringtableIndex, posIndex, offset, funcDescTable)) { + if (compare(&symbols[index], addr, stringtableIndex, posIndex, offset, funcDescTable)) { return true; } } } else { - long cur_pos; - if ((cur_pos = ftell(m_file)) == -1 || - fseek(m_file, m_shdr.sh_offset, SEEK_SET)) { - m_status = NullDecoder::file_invalid; + MarkedFileReader mfd(_fd); + + if (!mfd.has_mark() || !mfd.set_position(_section.section_header()->sh_offset)) { + _status = NullDecoder::file_invalid; return false; } Elf_Sym sym; for (int index = 0; index < count; index ++) { - if (fread(&sym, sym_size, 1, m_file) == 1) { - if (compare(&sym, addr, stringtableIndex, posIndex, offset, funcDescTable)) { - return true; - } - } else { - m_status = NullDecoder::file_invalid; + if (!mfd.read((void*)&sym, sizeof(sym))) { + _status = NullDecoder::file_invalid; return false; } + + if (compare(&sym, addr, stringtableIndex, posIndex, offset, funcDescTable)) { + return true; + } } - fseek(m_file, cur_pos, SEEK_SET); } return false; } diff -r bec86eb4a71a -r 206a6f728ce5 src/hotspot/share/utilities/elfSymbolTable.hpp --- a/src/hotspot/share/utilities/elfSymbolTable.hpp Fri Feb 23 12:30:03 2018 +0530 +++ b/src/hotspot/share/utilities/elfSymbolTable.hpp Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,29 +40,27 @@ */ class ElfSymbolTable: public CHeapObj { friend class ElfFile; - public: - ElfSymbolTable(FILE* file, Elf_Shdr shdr); +private: + ElfSymbolTable* _next; + + // file contains string table + FILE* const _fd; + + // corresponding section + ElfSection _section; + + NullDecoder::decoder_status _status; +public: + ElfSymbolTable(FILE* const file, Elf_Shdr& shdr); ~ElfSymbolTable(); // search the symbol that is nearest to the specified address. bool lookup(address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable); - NullDecoder::decoder_status get_status() { return m_status; }; - - protected: - ElfSymbolTable* m_next; - - // holds a complete symbol table section if - // can allocate enough memory - Elf_Sym* m_symbols; - - // file contains string table - FILE* m_file; - - // section header - Elf_Shdr m_shdr; - - NullDecoder::decoder_status m_status; + NullDecoder::decoder_status get_status() const { return _status; }; +private: + ElfSymbolTable* next() const { return _next; } + void set_next(ElfSymbolTable* next) { _next = next; } bool compare(const Elf_Sym* sym, address addr, int* stringtableIndex, int* posIndex, int* offset, ElfFuncDescTable* funcDescTable); }; diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/com/sun/crypto/provider/BlockCipherParamsCore.java --- a/src/java.base/share/classes/com/sun/crypto/provider/BlockCipherParamsCore.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/com/sun/crypto/provider/BlockCipherParamsCore.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -115,7 +115,7 @@ * Returns a formatted string describing the parameters. */ public String toString() { - String LINE_SEP = System.getProperty("line.separator"); + String LINE_SEP = System.lineSeparator(); String ivString = LINE_SEP + " iv:" + LINE_SEP + "["; HexDumpEncoder encoder = new HexDumpEncoder(); diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/com/sun/crypto/provider/DHParameters.java --- a/src/java.base/share/classes/com/sun/crypto/provider/DHParameters.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/com/sun/crypto/provider/DHParameters.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -129,7 +129,7 @@ * Returns a formatted string describing the parameters. */ protected String engineToString() { - String LINE_SEP = System.getProperty("line.separator"); + String LINE_SEP = System.lineSeparator(); StringBuilder sb = new StringBuilder("SunJCE Diffie-Hellman Parameters:" diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/com/sun/crypto/provider/DHPublicKey.java --- a/src/java.base/share/classes/com/sun/crypto/provider/DHPublicKey.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/com/sun/crypto/provider/DHPublicKey.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ * * * @see DHPrivateKey - * @see java.security.KeyAgreement + * @see javax.crypto.KeyAgreement */ final class DHPublicKey implements PublicKey, javax.crypto.interfaces.DHPublicKey, Serializable { @@ -258,7 +258,7 @@ } public String toString() { - String LINE_SEP = System.getProperty("line.separator"); + String LINE_SEP = System.lineSeparator(); StringBuilder sb = new StringBuilder("SunJCE Diffie-Hellman Public Key:" diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/com/sun/crypto/provider/GCMParameters.java --- a/src/java.base/share/classes/com/sun/crypto/provider/GCMParameters.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/com/sun/crypto/provider/GCMParameters.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -134,7 +134,7 @@ * Returns a formatted string describing the parameters. */ protected String engineToString() { - String LINE_SEP = System.getProperty("line.separator"); + String LINE_SEP = System.lineSeparator(); HexDumpEncoder encoder = new HexDumpEncoder(); StringBuilder sb = new StringBuilder(LINE_SEP + " iv:" + LINE_SEP + "[" diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/com/sun/crypto/provider/PBEParameters.java --- a/src/java.base/share/classes/com/sun/crypto/provider/PBEParameters.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/com/sun/crypto/provider/PBEParameters.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -135,7 +135,7 @@ * Returns a formatted string describing the parameters. */ protected String engineToString() { - String LINE_SEP = System.getProperty("line.separator"); + String LINE_SEP = System.lineSeparator(); String saltString = LINE_SEP + " salt:" + LINE_SEP + "["; HexDumpEncoder encoder = new HexDumpEncoder(); saltString += encoder.encodeBuffer(salt); diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/com/sun/crypto/provider/RC2Parameters.java --- a/src/java.base/share/classes/com/sun/crypto/provider/RC2Parameters.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/com/sun/crypto/provider/RC2Parameters.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -214,7 +214,7 @@ * Returns a formatted string describing the parameters. */ protected String engineToString() { - String LINE_SEP = System.getProperty("line.separator"); + String LINE_SEP = System.lineSeparator(); HexDumpEncoder encoder = new HexDumpEncoder(); StringBuilder sb = new StringBuilder(LINE_SEP + " iv:" + LINE_SEP + "[" diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/io/FileCleanable.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/classes/java/io/FileCleanable.java Mon Feb 26 10:36:34 2018 -0800 @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.io; + +import jdk.internal.misc.JavaIOFileDescriptorAccess; +import jdk.internal.misc.SharedSecrets; +import jdk.internal.ref.CleanerFactory; +import jdk.internal.ref.PhantomCleanable; + +import java.lang.ref.Cleaner; + +/** + * Cleanable for a FileDescriptor when it becomes phantom reachable. + * For regular fds on Unix and regular handles on Windows + * register a cleanup if fd != -1 or handle != -1. + *

+ * Subclassed from {@code PhantomCleanable} so that {@code clear} can be + * called to disable the cleanup when the handle is closed by any means other + * than calling {@link FileDescriptor#close}. + * Otherwise, it might incorrectly close the handle after it has been reused. + */ +final class FileCleanable extends PhantomCleanable { + + // Access to FileDescriptor private fields; + // avoids making fd and handle package private + private static final JavaIOFileDescriptorAccess fdAccess = + SharedSecrets.getJavaIOFileDescriptorAccess(); + + /* + * Raw close of the file fd and/or handle. + * Used only for last chance cleanup. + */ + private static native void cleanupClose0(int fd, long handle) throws IOException; + + // The raw fd to close + private final int fd; + + // The handle to close + private final long handle; + + /** + * Register a Cleanable with the FileDescriptor + * if the FileDescriptor is non-null and valid. + * @implNote + * A exception (OutOfMemoryException) will leave the FileDescriptor + * having allocated resources and leak the fd/handle. + * + * @param fdo the FileDescriptor; may be null + */ + static void register(FileDescriptor fdo) { + if (fdo != null && fdo.valid()) { + int fd = fdAccess.get(fdo); + long handle = fdAccess.getHandle(fdo); + fdo.registerCleanup(new FileCleanable(fdo, CleanerFactory.cleaner(), fd, handle)); + } + } + + /** + * Unregister a Cleanable from the FileDescriptor. + * @param fdo the FileDescriptor; may be null + */ + static void unregister(FileDescriptor fdo) { + if (fdo != null) { + fdo.unregisterCleanup(); + } + } + + /** + * Constructor for a phantom cleanable reference. + * + * @param obj the object to monitor + * @param cleaner the cleaner + * @param fd file descriptor to close + * @param handle handle to close + */ + private FileCleanable(FileDescriptor obj, Cleaner cleaner, int fd, long handle) { + super(obj, cleaner); + this.fd = fd; + this.handle = handle; + } + + /** + * Close the native handle or fd. + */ + @Override + protected void performCleanup() { + try { + cleanupClose0(fd, handle); + } catch (IOException ioe) { + throw new UncheckedIOException("close", ioe); + } + } +} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/io/FileDescriptor.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.base/share/classes/java/io/FileDescriptor.java Mon Feb 26 10:36:34 2018 -0800 @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.io; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import jdk.internal.misc.JavaIOFileDescriptorAccess; +import jdk.internal.misc.SharedSecrets; +import jdk.internal.ref.PhantomCleanable; + +/** + * Instances of the file descriptor class serve as an opaque handle + * to the underlying machine-specific structure representing an open + * file, an open socket, or another source or sink of bytes. + * The main practical use for a file descriptor is to create a + * {@link FileInputStream} or {@link FileOutputStream} to contain it. + *

+ * Applications should not create their own file descriptors. + * + * @author Pavani Diwanji + * @since 1.0 + */ +public final class FileDescriptor { + + private int fd; + + private long handle; + + private Closeable parent; + private List otherParents; + private boolean closed; + + /** + * true, if file is opened for appending. + */ + private boolean append; + + static { + initIDs(); + } + + // Set up JavaIOFileDescriptorAccess in SharedSecrets + static { + SharedSecrets.setJavaIOFileDescriptorAccess( + new JavaIOFileDescriptorAccess() { + public void set(FileDescriptor fdo, int fd) { + fdo.set(fd); + } + + public int get(FileDescriptor fdo) { + return fdo.fd; + } + + public void setAppend(FileDescriptor fdo, boolean append) { + fdo.append = append; + } + + public boolean getAppend(FileDescriptor fdo) { + return fdo.append; + } + + public void close(FileDescriptor fdo) throws IOException { + fdo.close(); + } + + /* Register for a normal FileCleanable fd/handle cleanup. */ + public void registerCleanup(FileDescriptor fdo) { + FileCleanable.register(fdo); + } + + /* Register a custom PhantomCleanup. */ + public void registerCleanup(FileDescriptor fdo, + PhantomCleanable cleanup) { + fdo.registerCleanup(cleanup); + } + + public void unregisterCleanup(FileDescriptor fdo) { + fdo.unregisterCleanup(); + } + + public void setHandle(FileDescriptor fdo, long handle) { + fdo.setHandle(handle); + } + + public long getHandle(FileDescriptor fdo) { + return fdo.handle; + } + } + ); + } + + /** + * Cleanup in case FileDescriptor is not explicitly closed. + */ + private PhantomCleanable cleanup; + + /** + * Constructs an (invalid) FileDescriptor object. + * The fd or handle is set later. + */ + public FileDescriptor() { + fd = -1; + handle = -1; + } + + /** + * Used for standard input, output, and error only. + * For Windows the corresponding handle is initialized. + * For Unix the append mode is cached. + * @param fd the raw fd number (0, 1, 2) + */ + private FileDescriptor(int fd) { + this.fd = fd; + this.handle = getHandle(fd); + this.append = getAppend(fd); + } + + /** + * A handle to the standard input stream. Usually, this file + * descriptor is not used directly, but rather via the input stream + * known as {@code System.in}. + * + * @see java.lang.System#in + */ + public static final FileDescriptor in = new FileDescriptor(0); + + /** + * A handle to the standard output stream. Usually, this file + * descriptor is not used directly, but rather via the output stream + * known as {@code System.out}. + * @see java.lang.System#out + */ + public static final FileDescriptor out = new FileDescriptor(1); + + /** + * A handle to the standard error stream. Usually, this file + * descriptor is not used directly, but rather via the output stream + * known as {@code System.err}. + * + * @see java.lang.System#err + */ + public static final FileDescriptor err = new FileDescriptor(2); + + /** + * Tests if this file descriptor object is valid. + * + * @return {@code true} if the file descriptor object represents a + * valid, open file, socket, or other active I/O connection; + * {@code false} otherwise. + */ + public boolean valid() { + return (handle != -1) || (fd != -1); + } + + /** + * Force all system buffers to synchronize with the underlying + * device. This method returns after all modified data and + * attributes of this FileDescriptor have been written to the + * relevant device(s). In particular, if this FileDescriptor + * refers to a physical storage medium, such as a file in a file + * system, sync will not return until all in-memory modified copies + * of buffers associated with this FileDescriptor have been + * written to the physical medium. + * + * sync is meant to be used by code that requires physical + * storage (such as a file) to be in a known state For + * example, a class that provided a simple transaction facility + * might use sync to ensure that all changes to a file caused + * by a given transaction were recorded on a storage medium. + * + * sync only affects buffers downstream of this FileDescriptor. If + * any in-memory buffering is being done by the application (for + * example, by a BufferedOutputStream object), those buffers must + * be flushed into the FileDescriptor (for example, by invoking + * OutputStream.flush) before that data will be affected by sync. + * + * @exception SyncFailedException + * Thrown when the buffers cannot be flushed, + * or because the system cannot guarantee that all the + * buffers have been synchronized with physical media. + * @since 1.1 + */ + public native void sync() throws SyncFailedException; + + /* This routine initializes JNI field offsets for the class */ + private static native void initIDs(); + + /* + * On Windows return the handle for the standard streams. + */ + private static native long getHandle(int d); + + /** + * Returns true, if the file was opened for appending. + */ + private static native boolean getAppend(int fd); + + /** + * Set the fd. + * Used on Unix and for sockets on Windows and Unix. + * If setting to -1, clear the cleaner. + * The {@link #registerCleanup} method should be called for new fds. + * @param fd the raw fd or -1 to indicate closed + */ + @SuppressWarnings("unchecked") + synchronized void set(int fd) { + if (fd == -1 && cleanup != null) { + cleanup.clear(); + cleanup = null; + } + this.fd = fd; + } + + /** + * Set the handle. + * Used on Windows for regular files. + * If setting to -1, clear the cleaner. + * The {@link #registerCleanup} method should be called for new handles. + * @param handle the handle or -1 to indicate closed + */ + @SuppressWarnings("unchecked") + void setHandle(long handle) { + if (handle == -1 && cleanup != null) { + cleanup.clear(); + cleanup = null; + } + this.handle = handle; + } + + /** + * Register a cleanup for the current handle. + * Used directly in java.io and indirectly via fdAccess. + * The cleanup should be registered after the handle is set in the FileDescriptor. + * @param cleanable a PhantomCleanable to register + */ + @SuppressWarnings("unchecked") + synchronized void registerCleanup(PhantomCleanable cleanable) { + Objects.requireNonNull(cleanable, "cleanable"); + if (cleanup != null) { + cleanup.clear(); + } + cleanup = cleanable; + } + + /** + * Unregister a cleanup for the current raw fd or handle. + * Used directly in java.io and indirectly via fdAccess. + * Normally {@link #close()} should be used except in cases where + * it is certain the caller will close the raw fd and the cleanup + * must not close the raw fd. {@link #unregisterCleanup()} must be + * called before the raw fd is closed to prevent a race that makes + * it possible for the fd to be reallocated to another use and later + * the cleanup might be invoked. + */ + synchronized void unregisterCleanup() { + if (cleanup != null) { + cleanup.clear(); + } + cleanup = null; + } + + /** + * Close the raw file descriptor or handle, if it has not already been closed. + * The native code sets the fd and handle to -1. + * Clear the cleaner so the close does not happen twice. + * Package private to allow it to be used in java.io. + * @throws IOException if close fails + */ + @SuppressWarnings("unchecked") + synchronized void close() throws IOException { + unregisterCleanup(); + close0(); + } + + /* + * Close the raw file descriptor or handle, if it has not already been closed + * and set the fd and handle to -1. + */ + private native void close0() throws IOException; + + /* + * Package private methods to track referents. + * If multiple streams point to the same FileDescriptor, we cycle + * through the list of all referents and call close() + */ + + /** + * Attach a Closeable to this FD for tracking. + * parent reference is added to otherParents when + * needed to make closeAll simpler. + */ + synchronized void attach(Closeable c) { + if (parent == null) { + // first caller gets to do this + parent = c; + } else if (otherParents == null) { + otherParents = new ArrayList<>(); + otherParents.add(parent); + otherParents.add(c); + } else { + otherParents.add(c); + } + } + + /** + * Cycle through all Closeables sharing this FD and call + * close() on each one. + * + * The caller closeable gets to call close0(). + */ + @SuppressWarnings("try") + synchronized void closeAll(Closeable releaser) throws IOException { + if (!closed) { + closed = true; + IOException ioe = null; + try (releaser) { + if (otherParents != null) { + for (Closeable referent : otherParents) { + try { + referent.close(); + } catch(IOException x) { + if (ioe == null) { + ioe = x; + } else { + ioe.addSuppressed(x); + } + } + } + } + } catch(IOException ex) { + /* + * If releaser close() throws IOException + * add other exceptions as suppressed. + */ + if (ioe != null) + ex.addSuppressed(ioe); + ioe = ex; + } finally { + if (ioe != null) + throw ioe; + } + } + } +} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/io/FileInputStream.java --- a/src/java.base/share/classes/java/io/FileInputStream.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/io/FileInputStream.java Mon Feb 26 10:36:34 2018 -0800 @@ -25,7 +25,6 @@ package java.io; -import java.lang.reflect.Method; import java.nio.channels.FileChannel; import sun.nio.ch.FileChannelImpl; @@ -158,7 +157,7 @@ open(name); altFinalizer = AltFinalizer.get(this); if (altFinalizer == null) { - fd.registerCleanup(); // open set the fd, register the cleanup + FileCleanable.register(fd); // open set the fd, register the cleanup } } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/io/FileOutputStream.java --- a/src/java.base/share/classes/java/io/FileOutputStream.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/io/FileOutputStream.java Mon Feb 26 10:36:34 2018 -0800 @@ -25,7 +25,6 @@ package java.io; -import java.lang.reflect.Method; import java.nio.channels.FileChannel; import jdk.internal.misc.SharedSecrets; import jdk.internal.misc.JavaIOFileDescriptorAccess; @@ -238,7 +237,7 @@ open(name, append); altFinalizer = AltFinalizer.get(this); if (altFinalizer == null) { - fd.registerCleanup(); // open set the fd, register the cleanup + FileCleanable.register(fd); // open sets the fd, register the cleanup } } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/io/RandomAccessFile.java --- a/src/java.base/share/classes/java/io/RandomAccessFile.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/io/RandomAccessFile.java Mon Feb 26 10:36:34 2018 -0800 @@ -257,7 +257,7 @@ fd.attach(this); path = name; open(name, imode); - fd.registerCleanup(); // open sets the fd, register the cleanup + FileCleanable.register(fd); // open sets the fd, register the cleanup } /** diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/ClassLoader.java --- a/src/java.base/share/classes/java/lang/ClassLoader.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/ClassLoader.java Mon Feb 26 10:36:34 2018 -0800 @@ -1922,7 +1922,7 @@ case 3: String msg = "getSystemClassLoader cannot be called during the system class loader instantiation"; throw new IllegalStateException(msg); - case 4: + default: // system fully initialized assert VM.isBooted() && scl != null; SecurityManager sm = System.getSecurityManager(); @@ -1930,8 +1930,6 @@ checkClassLoaderPermission(scl, Reflection.getCallerClass()); } return scl; - default: - throw new InternalError("should not reach here"); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/Runtime.java --- a/src/java.base/share/classes/java/lang/Runtime.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/Runtime.java Mon Feb 26 10:36:34 2018 -0800 @@ -79,18 +79,14 @@ * serves as a status code; by convention, a nonzero status code indicates * abnormal termination. * - *

The virtual machine's shutdown sequence consists of two phases. In - * the first phase all registered {@link #addShutdownHook shutdown hooks}, - * if any, are started in some unspecified order and allowed to run - * concurrently until they finish. In the second phase all uninvoked - * finalizers are run if {@link #runFinalizersOnExit finalization-on-exit} - * has been enabled. Once this is done the virtual machine {@link #halt halts}. + *

All registered {@linkplain #addShutdownHook shutdown hooks}, if any, + * are started in some unspecified order and allowed to run concurrently + * until they finish. Once this is done the virtual machine + * {@linkplain #halt halts}. * - *

If this method is invoked after the virtual machine has begun its - * shutdown sequence then if shutdown hooks are being run this method will - * block indefinitely. If shutdown hooks have already been run and on-exit - * finalization has been enabled then this method halts the virtual machine - * with the given status code if the status is nonzero; otherwise, it + *

If this method is invoked after all shutdown hooks have already + * been run and the status is nonzero then this method halts the + * virtual machine with the given status code. Otherwise, this method * blocks indefinitely. * *

The {@link System#exit(int) System.exit} method is the @@ -109,7 +105,6 @@ * @see java.lang.SecurityManager#checkExit(int) * @see #addShutdownHook * @see #removeShutdownHook - * @see #runFinalizersOnExit * @see #halt(int) */ public void exit(int status) { @@ -142,10 +137,9 @@ * thread. When the virtual machine begins its shutdown sequence it will * start all registered shutdown hooks in some unspecified order and let * them run concurrently. When all the hooks have finished it will then - * run all uninvoked finalizers if finalization-on-exit has been enabled. - * Finally, the virtual machine will halt. Note that daemon threads will - * continue to run during the shutdown sequence, as will non-daemon threads - * if shutdown was initiated by invoking the {@link #exit exit} method. + * halt. Note that daemon threads will continue to run during the shutdown + * sequence, as will non-daemon threads if shutdown was initiated by + * invoking the {@link #exit exit} method. * *

Once the shutdown sequence has begun it can be stopped only by * invoking the {@link #halt halt} method, which forcibly @@ -255,10 +249,9 @@ * *

This method should be used with extreme caution. Unlike the * {@link #exit exit} method, this method does not cause shutdown - * hooks to be started and does not run uninvoked finalizers if - * finalization-on-exit has been enabled. If the shutdown sequence has - * already been initiated then this method does not wait for any running - * shutdown hooks or finalizers to finish their work. + * hooks to be started. If the shutdown sequence has already been + * initiated then this method does not wait for any running + * shutdown hooks to finish their work. * * @param status * Termination status. By convention, a nonzero status code @@ -286,46 +279,6 @@ } /** - * Enable or disable finalization on exit; doing so specifies that the - * finalizers of all objects that have finalizers that have not yet been - * automatically invoked are to be run before the Java runtime exits. - * By default, finalization on exit is disabled. - * - *

If there is a security manager, - * its {@code checkExit} method is first called - * with 0 as its argument to ensure the exit is allowed. - * This could result in a SecurityException. - * - * @param value true to enable finalization on exit, false to disable - * @deprecated This method is inherently unsafe. It may result in - * finalizers being called on live objects while other threads are - * concurrently manipulating those objects, resulting in erratic - * behavior or deadlock. - * This method is subject to removal in a future version of Java SE. - * - * @throws SecurityException - * if a security manager exists and its {@code checkExit} - * method doesn't allow the exit. - * - * @see java.lang.Runtime#exit(int) - * @see java.lang.Runtime#gc() - * @see java.lang.SecurityManager#checkExit(int) - * @since 1.1 - */ - @Deprecated(since="1.2", forRemoval=true) - public static void runFinalizersOnExit(boolean value) { - SecurityManager security = System.getSecurityManager(); - if (security != null) { - try { - security.checkExit(0); - } catch (SecurityException e) { - throw new SecurityException("runFinalizersOnExit"); - } - } - Shutdown.setRunFinalizersOnExit(value); - } - - /** * Executes the specified string command in a separate process. * *

This is a convenience method. An invocation of the form diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/Shutdown.java --- a/src/java.base/share/classes/java/lang/Shutdown.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/Shutdown.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2005, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,35 +26,33 @@ package java.lang; +import jdk.internal.misc.VM; + /** * Package-private utility class containing data structures and logic * governing the virtual-machine shutdown sequence. * * @author Mark Reinhold * @since 1.3 + * + * @see java.io.Console + * @see ApplicationShutdownHooks + * @see java.io.DeleteOnExitHook */ class Shutdown { - /* Shutdown state */ - private static final int RUNNING = 0; - private static final int HOOKS = 1; - private static final int FINALIZERS = 2; - private static int state = RUNNING; - - /* Should we run all finalizers upon exit? */ - private static boolean runFinalizersOnExit = false; - // The system shutdown hooks are registered with a predefined slot. // The list of shutdown hooks is as follows: // (0) Console restore hook - // (1) Application hooks + // (1) ApplicationShutdownHooks that invokes all registered application + // shutdown hooks and waits until they finish // (2) DeleteOnExit hook private static final int MAX_SYSTEM_HOOKS = 10; private static final Runnable[] hooks = new Runnable[MAX_SYSTEM_HOOKS]; // the index of the currently running shutdown hook to the hooks array - private static int currentRunningHook = 0; + private static int currentRunningHook = -1; /* The preceding static fields are protected by this lock */ private static class Lock { }; @@ -63,17 +61,9 @@ /* Lock object for the native halt method */ private static Object haltLock = new Lock(); - /* Invoked by Runtime.runFinalizersOnExit */ - static void setRunFinalizersOnExit(boolean run) { - synchronized (lock) { - runFinalizersOnExit = run; - } - } - - /** - * Add a new shutdown hook. Checks the shutdown state and the hook itself, - * but does not do any security checks. + * Add a new system shutdown hook. Checks the shutdown state and + * the hook itself, but does not do any security checks. * * The registerShutdownInProgress parameter should be false except * registering the DeleteOnExitHook since the first file may @@ -92,15 +82,18 @@ * already passes the given slot */ static void add(int slot, boolean registerShutdownInProgress, Runnable hook) { + if (slot < 0 || slot >= MAX_SYSTEM_HOOKS) { + throw new IllegalArgumentException("Invalid slot: " + slot); + } synchronized (lock) { if (hooks[slot] != null) throw new InternalError("Shutdown hook at slot " + slot + " already registered"); if (!registerShutdownInProgress) { - if (state > RUNNING) + if (currentRunningHook >= 0) throw new IllegalStateException("Shutdown in progress"); } else { - if (state > HOOKS || (state == HOOKS && slot <= currentRunningHook)) + if (VM.isShutdown() || slot <= currentRunningHook) throw new IllegalStateException("Shutdown in progress"); } @@ -108,9 +101,23 @@ } } - /* Run all registered shutdown hooks + /* Run all system shutdown hooks. + * + * The system shutdown hooks are run in the thread synchronized on + * Shutdown.class. Other threads calling Runtime::exit, Runtime::halt + * or JNI DestroyJavaVM will block indefinitely. + * + * ApplicationShutdownHooks is registered as one single hook that starts + * all application shutdown hooks and waits until they finish. */ private static void runHooks() { + synchronized (lock) { + /* Guard against the possibility of a daemon thread invoking exit + * after DestroyJavaVM initiates the shutdown sequence + */ + if (VM.isShutdown()) return; + } + for (int i=0; i < MAX_SYSTEM_HOOKS; i++) { try { Runnable hook; @@ -121,13 +128,16 @@ hook = hooks[i]; } if (hook != null) hook.run(); - } catch(Throwable t) { + } catch (Throwable t) { if (t instanceof ThreadDeath) { ThreadDeath td = (ThreadDeath)t; throw td; } } } + + // set shutdown state + VM.shutdown(); } /* The halt method is synchronized on the halt lock @@ -142,74 +152,22 @@ static native void halt0(int status); - /* Wormhole for invoking java.lang.ref.Finalizer.runAllFinalizers */ - private static native void runAllFinalizers(); - - - /* The actual shutdown sequence is defined here. - * - * If it weren't for runFinalizersOnExit, this would be simple -- we'd just - * run the hooks and then halt. Instead we need to keep track of whether - * we're running hooks or finalizers. In the latter case a finalizer could - * invoke exit(1) to cause immediate termination, while in the former case - * any further invocations of exit(n), for any n, simply stall. Note that - * if on-exit finalizers are enabled they're run iff the shutdown is - * initiated by an exit(0); they're never run on exit(n) for n != 0 or in - * response to SIGINT, SIGTERM, etc. - */ - private static void sequence() { - synchronized (lock) { - /* Guard against the possibility of a daemon thread invoking exit - * after DestroyJavaVM initiates the shutdown sequence - */ - if (state != HOOKS) return; - } - runHooks(); - boolean rfoe; - synchronized (lock) { - state = FINALIZERS; - rfoe = runFinalizersOnExit; - } - if (rfoe) runAllFinalizers(); - } - - /* Invoked by Runtime.exit, which does all the security checks. * Also invoked by handlers for system-provided termination events, * which should pass a nonzero status code. */ static void exit(int status) { - boolean runMoreFinalizers = false; synchronized (lock) { - if (status != 0) runFinalizersOnExit = false; - switch (state) { - case RUNNING: /* Initiate shutdown */ - state = HOOKS; - break; - case HOOKS: /* Stall and halt */ - break; - case FINALIZERS: - if (status != 0) { - /* Halt immediately on nonzero status */ - halt(status); - } else { - /* Compatibility with old behavior: - * Run more finalizers and then halt - */ - runMoreFinalizers = runFinalizersOnExit; - } - break; + if (status != 0 && VM.isShutdown()) { + /* Halt immediately on nonzero status */ + halt(status); } } - if (runMoreFinalizers) { - runAllFinalizers(); - halt(status); - } synchronized (Shutdown.class) { /* Synchronize on the class object, causing any other thread * that attempts to initiate shutdown to stall indefinitely */ - sequence(); + runHooks(); halt(status); } } @@ -220,18 +178,8 @@ * actually halt the VM. */ static void shutdown() { - synchronized (lock) { - switch (state) { - case RUNNING: /* Initiate shutdown */ - state = HOOKS; - break; - case HOOKS: /* Stall and then return */ - case FINALIZERS: - break; - } - } synchronized (Shutdown.class) { - sequence(); + runHooks(); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/System.java --- a/src/java.base/share/classes/java/lang/System.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/System.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1994, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1994, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -1766,38 +1766,6 @@ } /** - * Enable or disable finalization on exit; doing so specifies that the - * finalizers of all objects that have finalizers that have not yet been - * automatically invoked are to be run before the Java runtime exits. - * By default, finalization on exit is disabled. - * - *

If there is a security manager, - * its checkExit method is first called - * with 0 as its argument to ensure the exit is allowed. - * This could result in a SecurityException. - * - * @deprecated This method is inherently unsafe. It may result in - * finalizers being called on live objects while other threads are - * concurrently manipulating those objects, resulting in erratic - * behavior or deadlock. - * This method is subject to removal in a future version of Java SE. - * @param value indicating enabling or disabling of finalization - * @throws SecurityException - * if a security manager exists and its checkExit - * method doesn't allow the exit. - * - * @see java.lang.Runtime#exit(int) - * @see java.lang.Runtime#gc() - * @see java.lang.SecurityManager#checkExit(int) - * @since 1.1 - */ - @Deprecated(since="1.2", forRemoval=true) - @SuppressWarnings("removal") - public static void runFinalizersOnExit(boolean value) { - Runtime.runFinalizersOnExit(value); - } - - /** * Loads the native library specified by the filename argument. The filename * argument must be an absolute path name. * diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/doc-files/threadPrimitiveDeprecation.html --- a/src/java.base/share/classes/java/lang/doc-files/threadPrimitiveDeprecation.html Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/doc-files/threadPrimitiveDeprecation.html Mon Feb 26 10:36:34 2018 -0800 @@ -1,6 +1,6 @@

diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/invoke/BootstrapMethodInvoker.java --- a/src/java.base/share/classes/java/lang/invoke/BootstrapMethodInvoker.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/invoke/BootstrapMethodInvoker.java Mon Feb 26 10:36:34 2018 -0800 @@ -37,6 +37,7 @@ import static java.lang.invoke.MethodHandles.Lookup.IMPL_LOOKUP; final class BootstrapMethodInvoker { + /** * Factored code for invoking a bootstrap method for invokedynamic * or a dynamic constant. @@ -76,14 +77,30 @@ bootstrapMethod = null; } try { + // As an optimization we special case various known BSMs, + // such as LambdaMetafactory::metafactory and + // StringConcatFactory::makeConcatWithConstants. + // + // By providing static type information or even invoking + // exactly, we avoid emitting code to perform runtime + // checking. info = maybeReBox(info); if (info == null) { // VM is allowed to pass up a null meaning no BSM args - result = bootstrapMethod.invoke(caller, name, type); + result = invoke(bootstrapMethod, caller, name, type); } else if (!info.getClass().isArray()) { // VM is allowed to pass up a single BSM arg directly - result = bootstrapMethod.invoke(caller, name, type, info); + + // Call to StringConcatFactory::makeConcatWithConstants + // with empty constant arguments? + if (isStringConcatFactoryBSM(bootstrapMethod.type())) { + result = (CallSite)bootstrapMethod + .invokeExact(caller, name, (MethodType)type, + (String)info, new Object[0]); + } else { + result = invoke(bootstrapMethod, caller, name, type, info); + } } else if (info.getClass() == int[].class) { // VM is allowed to pass up a pair {argc, index} @@ -103,67 +120,52 @@ // VM is allowed to pass up a full array of resolved BSM args Object[] argv = (Object[]) info; maybeReBoxElements(argv); - switch (argv.length) { - case 0: - result = bootstrapMethod.invoke(caller, name, type); - break; - case 1: - result = bootstrapMethod.invoke(caller, name, type, - argv[0]); - break; - case 2: - result = bootstrapMethod.invoke(caller, name, type, - argv[0], argv[1]); - break; - case 3: - // Special case the LambdaMetafactory::metafactory BSM - // - // By invoking exactly, we can avoid generating a number of - // classes on first (and subsequent) lambda initialization, - // most of which won't be shared with other invoke uses. - MethodType bsmType = bootstrapMethod.type(); - if (isLambdaMetafactoryIndyBSM(bsmType)) { - result = (CallSite)bootstrapMethod - .invokeExact(caller, name, (MethodType)type, (MethodType)argv[0], - (MethodHandle)argv[1], (MethodType)argv[2]); - } else if (isLambdaMetafactoryCondyBSM(bsmType)) { - result = bootstrapMethod - .invokeExact(caller, name, (Class)type, (MethodType)argv[0], - (MethodHandle)argv[1], (MethodType)argv[2]); - } else { - result = bootstrapMethod.invoke(caller, name, type, - argv[0], argv[1], argv[2]); - } - break; - case 4: - result = bootstrapMethod.invoke(caller, name, type, - argv[0], argv[1], argv[2], argv[3]); - break; - case 5: - result = bootstrapMethod.invoke(caller, name, type, - argv[0], argv[1], argv[2], argv[3], argv[4]); - break; - case 6: - result = bootstrapMethod.invoke(caller, name, type, - argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]); - break; - default: - final int NON_SPREAD_ARG_COUNT = 3; // (caller, name, type) - final int MAX_SAFE_SIZE = MethodType.MAX_MH_ARITY / 2 - NON_SPREAD_ARG_COUNT; - if (argv.length >= MAX_SAFE_SIZE) { - // to be on the safe side, use invokeWithArguments which handles jumbo lists - Object[] newargv = new Object[NON_SPREAD_ARG_COUNT + argv.length]; - newargv[0] = caller; - newargv[1] = name; - newargv[2] = type; - System.arraycopy(argv, 0, newargv, NON_SPREAD_ARG_COUNT, argv.length); - result = bootstrapMethod.invokeWithArguments(newargv); + + MethodType bsmType = bootstrapMethod.type(); + if (isLambdaMetafactoryIndyBSM(bsmType) && argv.length == 3) { + result = (CallSite)bootstrapMethod + .invokeExact(caller, name, (MethodType)type, (MethodType)argv[0], + (MethodHandle)argv[1], (MethodType)argv[2]); + } else if (isLambdaMetafactoryCondyBSM(bsmType) && argv.length == 3) { + result = bootstrapMethod + .invokeExact(caller, name, (Class)type, (MethodType)argv[0], + (MethodHandle)argv[1], (MethodType)argv[2]); + } else if (isStringConcatFactoryBSM(bsmType) && argv.length >= 1) { + String recipe = (String)argv[0]; + Object[] shiftedArgs = Arrays.copyOfRange(argv, 1, argv.length); + result = (CallSite)bootstrapMethod.invokeExact(caller, name, (MethodType)type, recipe, shiftedArgs); + } else { + switch (argv.length) { + case 0: + result = invoke(bootstrapMethod, caller, name, type); + break; + case 1: + result = invoke(bootstrapMethod, caller, name, type, + argv[0]); break; - } - MethodType invocationType = MethodType.genericMethodType(NON_SPREAD_ARG_COUNT + argv.length); - MethodHandle typedBSM = bootstrapMethod.asType(invocationType); - MethodHandle spreader = invocationType.invokers().spreadInvoker(NON_SPREAD_ARG_COUNT); - result = spreader.invokeExact(typedBSM, (Object) caller, (Object) name, type, argv); + case 2: + result = invoke(bootstrapMethod, caller, name, type, + argv[0], argv[1]); + break; + case 3: + result = invoke(bootstrapMethod, caller, name, type, + argv[0], argv[1], argv[2]); + break; + case 4: + result = invoke(bootstrapMethod, caller, name, type, + argv[0], argv[1], argv[2], argv[3]); + break; + case 5: + result = invoke(bootstrapMethod, caller, name, type, + argv[0], argv[1], argv[2], argv[3], argv[4]); + break; + case 6: + result = invoke(bootstrapMethod, caller, name, type, + argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]); + break; + default: + result = invokeWithManyArguments(bootstrapMethod, caller, name, type, argv); + } } } if (resultType.isPrimitive()) { @@ -191,12 +193,114 @@ } } + // If we don't provide static type information for type, we'll generate runtime + // checks. Let's try not to... + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, + String name, Object type) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type); + } + } + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, + String name, Object type, Object arg0) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type, arg0); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type, arg0); + } + } + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, String name, + Object type, Object arg0, Object arg1) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type, arg0, arg1); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type, arg0, arg1); + } + } + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, String name, + Object type, Object arg0, Object arg1, + Object arg2) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type, arg0, arg1, arg2); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type, arg0, arg1, arg2); + } + } + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, String name, + Object type, Object arg0, Object arg1, + Object arg2, Object arg3) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type, arg0, arg1, arg2, arg3); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type, arg0, arg1, arg2, arg3); + } + } + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, + String name, Object type, Object arg0, Object arg1, + Object arg2, Object arg3, Object arg4) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type, arg0, arg1, arg2, arg3, arg4); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type, arg0, arg1, arg2, arg3, arg4); + } + } + + private static Object invoke(MethodHandle bootstrapMethod, Lookup caller, + String name, Object type, Object arg0, Object arg1, + Object arg2, Object arg3, Object arg4, Object arg5) throws Throwable { + if (type instanceof Class) { + return bootstrapMethod.invoke(caller, name, (Class)type, arg0, arg1, arg2, arg3, arg4, arg5); + } else { + return bootstrapMethod.invoke(caller, name, (MethodType)type, arg0, arg1, arg2, arg3, arg4, arg5); + } + } + + private static Object invokeWithManyArguments(MethodHandle bootstrapMethod, Lookup caller, + String name, Object type, Object[] argv) throws Throwable { + final int NON_SPREAD_ARG_COUNT = 3; // (caller, name, type) + final int MAX_SAFE_SIZE = MethodType.MAX_MH_ARITY / 2 - NON_SPREAD_ARG_COUNT; + if (argv.length >= MAX_SAFE_SIZE) { + // to be on the safe side, use invokeWithArguments which handles jumbo lists + Object[] newargv = new Object[NON_SPREAD_ARG_COUNT + argv.length]; + newargv[0] = caller; + newargv[1] = name; + newargv[2] = type; + System.arraycopy(argv, 0, newargv, NON_SPREAD_ARG_COUNT, argv.length); + return bootstrapMethod.invokeWithArguments(newargv); + } else { + MethodType invocationType = MethodType.genericMethodType(NON_SPREAD_ARG_COUNT + argv.length); + MethodHandle typedBSM = bootstrapMethod.asType(invocationType); + MethodHandle spreader = invocationType.invokers().spreadInvoker(NON_SPREAD_ARG_COUNT); + return spreader.invokeExact(typedBSM, (Object) caller, (Object) name, type, argv); + } + } + private static final MethodType LMF_INDY_MT = MethodType.methodType(CallSite.class, Lookup.class, String.class, MethodType.class, MethodType.class, MethodHandle.class, MethodType.class); private static final MethodType LMF_CONDY_MT = MethodType.methodType(Object.class, Lookup.class, String.class, Class.class, MethodType.class, MethodHandle.class, MethodType.class); + private static final MethodType SCF_MT = MethodType.methodType(CallSite.class, + Lookup.class, String.class, MethodType.class, String.class, Object[].class); + + /** + * @return true iff the BSM method type exactly matches + * {@see java.lang.invoke.StringConcatFactory#makeConcatWithConstants(MethodHandles.Lookup, + * String,MethodType,String,Object...))} + */ + private static boolean isStringConcatFactoryBSM(MethodType bsmType) { + return bsmType == SCF_MT; + } + /** * @return true iff the BSM method type exactly matches * {@see java.lang.invoke.LambdaMetafactory#metafactory( diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/invoke/LambdaForm.java --- a/src/java.base/share/classes/java/lang/invoke/LambdaForm.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/invoke/LambdaForm.java Mon Feb 26 10:36:34 2018 -0800 @@ -1734,70 +1734,75 @@ private static final @Stable NamedFunction[] NF_identity = new NamedFunction[TYPE_LIMIT]; private static final @Stable NamedFunction[] NF_zero = new NamedFunction[TYPE_LIMIT]; - private static synchronized void createFormsFor(BasicType type) { - final int ord = type.ordinal(); - LambdaForm idForm = LF_identity[ord]; - if (idForm != null) { - return; - } - char btChar = type.basicTypeChar(); - boolean isVoid = (type == V_TYPE); - Class btClass = type.btClass; - MethodType zeType = MethodType.methodType(btClass); - MethodType idType = (isVoid) ? zeType : MethodType.methodType(btClass, btClass); + private static final Object createFormsLock = new Object(); + private static void createFormsFor(BasicType type) { + // Avoid racy initialization during bootstrap + UNSAFE.ensureClassInitialized(BoundMethodHandle.class); + synchronized (createFormsLock) { + final int ord = type.ordinal(); + LambdaForm idForm = LF_identity[ord]; + if (idForm != null) { + return; + } + char btChar = type.basicTypeChar(); + boolean isVoid = (type == V_TYPE); + Class btClass = type.btClass; + MethodType zeType = MethodType.methodType(btClass); + MethodType idType = (isVoid) ? zeType : MethodType.methodType(btClass, btClass); - // Look up symbolic names. It might not be necessary to have these, - // but if we need to emit direct references to bytecodes, it helps. - // Zero is built from a call to an identity function with a constant zero input. - MemberName idMem = new MemberName(LambdaForm.class, "identity_"+btChar, idType, REF_invokeStatic); - MemberName zeMem = null; - try { - idMem = IMPL_NAMES.resolveOrFail(REF_invokeStatic, idMem, null, NoSuchMethodException.class); - if (!isVoid) { - zeMem = new MemberName(LambdaForm.class, "zero_"+btChar, zeType, REF_invokeStatic); - zeMem = IMPL_NAMES.resolveOrFail(REF_invokeStatic, zeMem, null, NoSuchMethodException.class); + // Look up symbolic names. It might not be necessary to have these, + // but if we need to emit direct references to bytecodes, it helps. + // Zero is built from a call to an identity function with a constant zero input. + MemberName idMem = new MemberName(LambdaForm.class, "identity_"+btChar, idType, REF_invokeStatic); + MemberName zeMem = null; + try { + idMem = IMPL_NAMES.resolveOrFail(REF_invokeStatic, idMem, null, NoSuchMethodException.class); + if (!isVoid) { + zeMem = new MemberName(LambdaForm.class, "zero_"+btChar, zeType, REF_invokeStatic); + zeMem = IMPL_NAMES.resolveOrFail(REF_invokeStatic, zeMem, null, NoSuchMethodException.class); + } + } catch (IllegalAccessException|NoSuchMethodException ex) { + throw newInternalError(ex); } - } catch (IllegalAccessException|NoSuchMethodException ex) { - throw newInternalError(ex); - } - NamedFunction idFun; - LambdaForm zeForm; - NamedFunction zeFun; + NamedFunction idFun; + LambdaForm zeForm; + NamedFunction zeFun; - // Create the LFs and NamedFunctions. Precompiling LFs to byte code is needed to break circular - // bootstrap dependency on this method in case we're interpreting LFs - if (isVoid) { - Name[] idNames = new Name[] { argument(0, L_TYPE) }; - idForm = new LambdaForm(1, idNames, VOID_RESULT, Kind.IDENTITY); - idForm.compileToBytecode(); - idFun = new NamedFunction(idMem, SimpleMethodHandle.make(idMem.getInvocationType(), idForm)); + // Create the LFs and NamedFunctions. Precompiling LFs to byte code is needed to break circular + // bootstrap dependency on this method in case we're interpreting LFs + if (isVoid) { + Name[] idNames = new Name[] { argument(0, L_TYPE) }; + idForm = new LambdaForm(1, idNames, VOID_RESULT, Kind.IDENTITY); + idForm.compileToBytecode(); + idFun = new NamedFunction(idMem, SimpleMethodHandle.make(idMem.getInvocationType(), idForm)); - zeForm = idForm; - zeFun = idFun; - } else { - Name[] idNames = new Name[] { argument(0, L_TYPE), argument(1, type) }; - idForm = new LambdaForm(2, idNames, 1, Kind.IDENTITY); - idForm.compileToBytecode(); - idFun = new NamedFunction(idMem, SimpleMethodHandle.make(idMem.getInvocationType(), idForm), - MethodHandleImpl.Intrinsic.IDENTITY); + zeForm = idForm; + zeFun = idFun; + } else { + Name[] idNames = new Name[] { argument(0, L_TYPE), argument(1, type) }; + idForm = new LambdaForm(2, idNames, 1, Kind.IDENTITY); + idForm.compileToBytecode(); + idFun = new NamedFunction(idMem, SimpleMethodHandle.make(idMem.getInvocationType(), idForm), + MethodHandleImpl.Intrinsic.IDENTITY); - Object zeValue = Wrapper.forBasicType(btChar).zero(); - Name[] zeNames = new Name[] { argument(0, L_TYPE), new Name(idFun, zeValue) }; - zeForm = new LambdaForm(1, zeNames, 1, Kind.ZERO); - zeForm.compileToBytecode(); - zeFun = new NamedFunction(zeMem, SimpleMethodHandle.make(zeMem.getInvocationType(), zeForm), - MethodHandleImpl.Intrinsic.ZERO); - } + Object zeValue = Wrapper.forBasicType(btChar).zero(); + Name[] zeNames = new Name[] { argument(0, L_TYPE), new Name(idFun, zeValue) }; + zeForm = new LambdaForm(1, zeNames, 1, Kind.ZERO); + zeForm.compileToBytecode(); + zeFun = new NamedFunction(zeMem, SimpleMethodHandle.make(zeMem.getInvocationType(), zeForm), + MethodHandleImpl.Intrinsic.ZERO); + } - LF_zero[ord] = zeForm; - NF_zero[ord] = zeFun; - LF_identity[ord] = idForm; - NF_identity[ord] = idFun; + LF_zero[ord] = zeForm; + NF_zero[ord] = zeFun; + LF_identity[ord] = idForm; + NF_identity[ord] = idFun; - assert(idFun.isIdentity()); - assert(zeFun.isConstantZero()); - assert(new Name(zeFun).isConstantZero()); + assert(idFun.isIdentity()); + assert(zeFun.isConstantZero()); + assert(new Name(zeFun).isConstantZero()); + } } // Avoid appealing to ValueConversions at bootstrap time: diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java --- a/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/invoke/MethodHandleNatives.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -643,10 +643,7 @@ static boolean canBeCalledVirtual(MemberName mem) { assert(mem.isInvocable()); - Class defc = mem.getDeclaringClass(); switch (mem.getName()) { - case "checkMemberAccess": - return canBeCalledVirtual(mem, java.lang.SecurityManager.class); case "getContextClassLoader": return canBeCalledVirtual(mem, java.lang.Thread.class); } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java --- a/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java Mon Feb 26 10:36:34 2018 -0800 @@ -373,6 +373,12 @@ } } + // StringConcatFactory bootstrap methods are startup sensitive, and may be + // special cased in java.lang.invokeBootstrapMethodInvoker to ensure + // methods are invoked with exact type information to avoid generating + // code for runtime checks. Take care any changes or additions here are + // reflected there as appropriate. + /** * Facilitates the creation of optimized String concatenation methods, that * can be used to efficiently concatenate a known number of arguments of diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/ref/Finalizer.java --- a/src/java.base/share/classes/java/lang/ref/Finalizer.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/ref/Finalizer.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -66,7 +66,7 @@ new Finalizer(finalizee); } - private void deregisterAndRunFinalizer(JavaLangAccess jla) { + private void runFinalizer(JavaLangAccess jla) { synchronized (lock) { if (this.next == this) // already finalized return; @@ -80,17 +80,14 @@ this.prev = null; this.next = this; // mark as finalized } - runFinalizer(jla); - } - private void runFinalizer(JavaLangAccess jla) { try { Object finalizee = this.get(); if (finalizee != null && !(finalizee instanceof java.lang.Enum)) { jla.invokeFinalize(finalizee); - /* Clear stack slot containing this variable, to decrease - the chances of false retention with a conservative GC */ + // Clear stack slot containing this variable, to decrease + // the chances of false retention with a conservative GC finalizee = null; } } catch (Throwable x) { } @@ -98,17 +95,14 @@ } /* Create a privileged secondary finalizer thread in the system thread - group for the given Runnable, and wait for it to complete. - - This method is used by both runFinalization and runFinalizersOnExit. - The former method invokes all pending finalizers, while the latter - invokes all uninvoked finalizers if on-exit finalization has been - enabled. - - These two methods could have been implemented by offloading their work - to the regular finalizer thread and waiting for that thread to finish. - The advantage of creating a fresh thread, however, is that it insulates - invokers of these methods from a stalled or deadlocked finalizer thread. + * group for the given Runnable, and wait for it to complete. + * + * This method is used by runFinalization. + * + * It could have been implemented by offloading the work to the + * regular finalizer thread and waiting for that thread to finish. + * The advantage of creating a fresh thread, however, is that it insulates + * invokers of that method from a stalled or deadlocked finalizer thread. */ private static void forkSecondaryFinalizer(final Runnable proc) { AccessController.doPrivileged( @@ -144,40 +138,11 @@ final JavaLangAccess jla = SharedSecrets.getJavaLangAccess(); running = true; for (Finalizer f; (f = (Finalizer)queue.poll()) != null; ) - f.deregisterAndRunFinalizer(jla); + f.runFinalizer(jla); } }); } - /* Invoked by java.lang.Shutdown */ - static void runAllFinalizers() { - if (VM.initLevel() == 0) { - return; - } - - forkSecondaryFinalizer(new Runnable() { - private volatile boolean running; - public void run() { - // in case of recursive call to run() - if (running) - return; - final JavaLangAccess jla = SharedSecrets.getJavaLangAccess(); - running = true; - for (;;) { - // "pollFirst" from unfinalized - Finalizer f; - synchronized (lock) { - f = unfinalized; - if (f == null) break; - unfinalized = f.next; - if (unfinalized != null) - unfinalized.prev = null; - f.next = f; // mark as finalized - } - f.runFinalizer(jla); - }}}); - } - private static class FinalizerThread extends Thread { private volatile boolean running; FinalizerThread(ThreadGroup g) { @@ -203,7 +168,7 @@ for (;;) { try { Finalizer f = (Finalizer)queue.remove(); - f.deregisterAndRunFinalizer(jla); + f.runFinalizer(jla); } catch (InterruptedException x) { // ignore and continue } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/lang/reflect/AccessibleObject.java --- a/src/java.base/share/classes/java/lang/reflect/AccessibleObject.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/lang/reflect/AccessibleObject.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,6 +26,7 @@ package java.lang.reflect; import java.lang.annotation.Annotation; +import java.lang.invoke.MethodHandle; import java.security.AccessController; import jdk.internal.misc.VM; @@ -180,6 +181,7 @@ * @revised 9 * @spec JPMS */ + @CallerSensitive // overrides in Method/Field/Constructor are @CS public void setAccessible(boolean flag) { AccessibleObject.checkPermission(); setAccessible0(flag); @@ -276,14 +278,17 @@ // do nothing, needs to be overridden by Constructor, Method, Field } - - void checkCanSetAccessible(Class caller, Class declaringClass) { + final void checkCanSetAccessible(Class caller, Class declaringClass) { checkCanSetAccessible(caller, declaringClass, true); } private boolean checkCanSetAccessible(Class caller, Class declaringClass, boolean throwExceptionIfDenied) { + if (caller == MethodHandle.class) { + throw new IllegalCallerException(); // should not happen + } + Module callerModule = caller.getModule(); Module declaringModule = declaringClass.getModule(); diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/net/SocketCleanable.java --- a/src/java.base/share/classes/java/net/SocketCleanable.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/net/SocketCleanable.java Mon Feb 26 10:36:34 2018 -0800 @@ -36,20 +36,21 @@ /** - * Cleanup for a socket/datagramsocket FileDescriptor when it becomes phantom reachable. + * Cleanable for a socket/datagramsocket FileDescriptor when it becomes phantom reachable. * Create a cleanup if the raw fd != -1. Windows closes sockets using the fd. * Subclassed from {@code PhantomCleanable} so that {@code clear} can be * called to disable the cleanup when the socket fd is closed by any means * other than calling {@link FileDescriptor#close}. - * Otherwise, it would incorrectly close the handle or fd after it has been reused. + * Otherwise, it might incorrectly close the handle or fd after it has been reused. */ -final class SocketCleanable extends PhantomCleanable { +final class SocketCleanable extends PhantomCleanable { - // Access to FileDescriptor internals + // Access to FileDescriptor private fields private static final JavaIOFileDescriptorAccess fdAccess = SharedSecrets.getJavaIOFileDescriptorAccess(); // Native function to call NET_SocketClose(fd) + // Used only for last chance cleanup. private static native void cleanupClose0(int fd) throws IOException; // The raw fd to close @@ -62,12 +63,10 @@ * @param fdo the FileDescriptor; may be null */ static void register(FileDescriptor fdo) { - if (fdo != null) { + if (fdo != null && fdo.valid()) { int fd = fdAccess.get(fdo); - if (fd != -1) { - fdAccess.registerCleanup(fdo, - new SocketCleanable(fdo, CleanerFactory.cleaner(), fd)); - } + fdAccess.registerCleanup(fdo, + new SocketCleanable(fdo, CleanerFactory.cleaner(), fd)); } } @@ -88,7 +87,7 @@ * @param cleaner the cleaner * @param fd file descriptor to close */ - private SocketCleanable(Object obj, Cleaner cleaner, int fd) { + private SocketCleanable(FileDescriptor obj, Cleaner cleaner, int fd) { super(obj, cleaner); this.fd = fd; } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/util/Currency.java --- a/src/java.base/share/classes/java/util/Currency.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/util/Currency.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,6 +41,7 @@ import java.util.regex.Pattern; import java.util.regex.Matcher; import java.util.spi.CurrencyNameProvider; +import java.util.stream.Collectors; import sun.util.locale.provider.CalendarDataUtility; import sun.util.locale.provider.LocaleServiceProviderPool; import sun.util.logging.PlatformLogger; @@ -77,7 +78,10 @@ * JP=JPZ,999,0 * *

- * will supersede the currency data for Japan. + * will supersede the currency data for Japan. If JPZ is one of the existing + * ISO 4217 currency code referred by other countries, the existing + * JPZ currency data is updated with the given numeric code and minor + * unit value. * *

* @@ -93,6 +97,11 @@ * country code entries exist, the behavior of the Currency information for that * {@code Currency} is undefined and the remainder of entries in file are processed. *

+ * If multiple property entries with same currency code but different numeric code + * and/or minor unit are encountered, those entries are ignored and the remainder + * of entries in file are processed. + * + *

* It is recommended to use {@link java.math.BigDecimal} class while dealing * with {@code Currency} or monetary values as it provides better handling of floating * point numbers and their operations. @@ -237,19 +246,17 @@ try (FileReader fr = new FileReader(propFile)) { props.load(fr); } - Set keys = props.stringPropertyNames(); Pattern propertiesPattern = - Pattern.compile("([A-Z]{3})\\s*,\\s*(\\d{3})\\s*,\\s*" + - "(\\d+)\\s*,?\\s*(\\d{4}-\\d{2}-\\d{2}T\\d{2}:" + - "\\d{2}:\\d{2})?"); - for (String key : keys) { - replaceCurrencyData(propertiesPattern, - key.toUpperCase(Locale.ROOT), - props.getProperty(key).toUpperCase(Locale.ROOT)); - } + Pattern.compile("([A-Z]{3})\\s*,\\s*(\\d{3})\\s*,\\s*" + + "(\\d+)\\s*,?\\s*(\\d{4}-\\d{2}-\\d{2}T\\d{2}:" + + "\\d{2}:\\d{2})?"); + List currencyEntries + = getValidCurrencyData(props, propertiesPattern); + currencyEntries.forEach(Currency::replaceCurrencyData); } } catch (IOException e) { - info("currency.properties is ignored because of an IOException", e); + CurrencyProperty.info("currency.properties is ignored" + + " because of an IOException", e); } return null; } @@ -769,71 +776,111 @@ } /** - * Replaces currency data found in the currencydata.properties file + * Parse currency data found in the properties file (that + * java.util.currency.data designates) to a List of CurrencyProperty + * instances. Also, remove invalid entries and the multiple currency + * code inconsistencies. * - * @param pattern regex pattern for the properties - * @param ctry country code - * @param curdata currency data. This is a comma separated string that - * consists of "three-letter alphabet code", "three-digit numeric code", - * and "one-digit (0-9) default fraction digit". - * For example, "JPZ,392,0". - * An optional UTC date can be appended to the string (comma separated) - * to allow a currency change take effect after date specified. - * For example, "JP=JPZ,999,0,2014-01-01T00:00:00" has no effect unless - * UTC time is past 1st January 2014 00:00:00 GMT. + * @param props properties containing currency data + * @param pattern regex pattern for the properties entry + * @return list of parsed property entries */ - private static void replaceCurrencyData(Pattern pattern, String ctry, String curdata) { + private static List getValidCurrencyData(Properties props, + Pattern pattern) { + + Set keys = props.stringPropertyNames(); + List propertyEntries = new ArrayList<>(); - if (ctry.length() != 2) { - // ignore invalid country code - info("currency.properties entry for " + ctry + - " is ignored because of the invalid country code.", null); - return; - } + // remove all invalid entries and parse all valid currency properties + // entries to a group of CurrencyProperty, classified by currency code + Map> currencyCodeGroup = keys.stream() + .map(k -> CurrencyProperty + .getValidEntry(k.toUpperCase(Locale.ROOT), + props.getProperty(k).toUpperCase(Locale.ROOT), + pattern)).flatMap(o -> o.stream()) + .collect(Collectors.groupingBy(entry -> entry.currencyCode)); - Matcher m = pattern.matcher(curdata); - if (!m.find() || (m.group(4) == null && countOccurrences(curdata, ',') >= 3)) { - // format is not recognized. ignore the data - // if group(4) date string is null and we've 4 values, bad date value - info("currency.properties entry for " + ctry + - " ignored because the value format is not recognized.", null); - return; - } - - try { - if (m.group(4) != null && !isPastCutoverDate(m.group(4))) { - info("currency.properties entry for " + ctry + - " ignored since cutover date has not passed :" + curdata, null); - return; + // check each group for inconsistencies + currencyCodeGroup.forEach((curCode, list) -> { + boolean inconsistent = CurrencyProperty + .containsInconsistentInstances(list); + if (inconsistent) { + list.forEach(prop -> CurrencyProperty.info("The property" + + " entry for " + prop.country + " is inconsistent." + + " Ignored.", null)); + } else { + propertyEntries.addAll(list); } - } catch (ParseException ex) { - info("currency.properties entry for " + ctry + - " ignored since exception encountered :" + ex.getMessage(), null); - return; - } + }); + + return propertyEntries; + } - String code = m.group(1); - int numeric = Integer.parseInt(m.group(2)); + /** + * Replaces currency data found in the properties file that + * java.util.currency.data designates. This method is invoked for + * each valid currency entry. + * + * @param prop CurrencyProperty instance of the valid property entry + */ + private static void replaceCurrencyData(CurrencyProperty prop) { + + + String ctry = prop.country; + String code = prop.currencyCode; + int numeric = prop.numericCode; + int fraction = prop.fraction; int entry = numeric << NUMERIC_CODE_SHIFT; - int fraction = Integer.parseInt(m.group(3)); - if (fraction > SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS) { - info("currency.properties entry for " + ctry + - " ignored since the fraction is more than " + - SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS + ":" + curdata, null); - return; - } int index = SpecialCaseEntry.indexOf(code, fraction, numeric); - /* if a country switches from simple case to special case or + + // If a new entry changes the numeric code/dfd of an existing + // currency code, update it in the sc list at the respective + // index and also change it in the other currencies list and + // main table (if that currency code is also used as a + // simple case). + + // If all three components do not match with the new entry, + // but the currency code exists in the special case list + // update the sc entry with the new entry + int scCurrencyCodeIndex = -1; + if (index == -1) { + scCurrencyCodeIndex = SpecialCaseEntry.currencyCodeIndex(code); + if (scCurrencyCodeIndex != -1) { + //currency code exists in sc list, then update the old entry + specialCasesList.set(scCurrencyCodeIndex, + new SpecialCaseEntry(code, fraction, numeric)); + + // also update the entry in other currencies list + OtherCurrencyEntry oe = OtherCurrencyEntry.findEntry(code); + if (oe != null) { + int oIndex = otherCurrenciesList.indexOf(oe); + otherCurrenciesList.set(oIndex, new OtherCurrencyEntry( + code, fraction, numeric)); + } + } + } + + /* If a country switches from simple case to special case or * one special case to other special case which is not present - * in the sc arrays then insert the new entry in special case arrays + * in the sc arrays then insert the new entry in special case arrays. + * If an entry with given currency code exists, update with the new + * entry. */ if (index == -1 && (ctry.charAt(0) != code.charAt(0) || ctry.charAt(1) != code.charAt(1))) { - specialCasesList.add(new SpecialCaseEntry(code, fraction, numeric)); - index = specialCasesList.size() - 1; + if(scCurrencyCodeIndex == -1) { + specialCasesList.add(new SpecialCaseEntry(code, fraction, + numeric)); + index = specialCasesList.size() - 1; + } else { + index = scCurrencyCodeIndex; + } + + // update the entry in main table if it exists as a simple case + updateMainTableEntry(code, fraction, numeric); } if (index == -1) { @@ -848,32 +895,29 @@ setMainTableEntry(ctry.charAt(0), ctry.charAt(1), entry); } - private static boolean isPastCutoverDate(String s) throws ParseException { - SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss", Locale.ROOT); - format.setTimeZone(TimeZone.getTimeZone("UTC")); - format.setLenient(false); - long time = format.parse(s.trim()).getTime(); - return System.currentTimeMillis() > time; - - } + // update the entry in maintable for any simple case found, if a new + // entry as a special case updates the entry in sc list with + // existing currency code + private static void updateMainTableEntry(String code, int fraction, + int numeric) { + // checking the existence of currency code in mainTable + int tableEntry = getMainTableEntry(code.charAt(0), code.charAt(1)); + int entry = numeric << NUMERIC_CODE_SHIFT; + if ((tableEntry & COUNTRY_TYPE_MASK) == SIMPLE_CASE_COUNTRY_MASK + && tableEntry != INVALID_COUNTRY_ENTRY + && code.charAt(2) - 'A' == (tableEntry + & SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK)) { - private static int countOccurrences(String value, char match) { - int count = 0; - for (char c : value.toCharArray()) { - if (c == match) { - ++count; - } - } - return count; - } - - private static void info(String message, Throwable t) { - PlatformLogger logger = PlatformLogger.getLogger("java.util.Currency"); - if (logger.isLoggable(PlatformLogger.Level.INFO)) { - if (t != null) { - logger.info(message, t); - } else { - logger.info(message); + int numericCode = (tableEntry & NUMERIC_CODE_MASK) + >> NUMERIC_CODE_SHIFT; + int defaultFractionDigits = (tableEntry + & SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_MASK) + >> SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_SHIFT; + if (numeric != numericCode || fraction != defaultFractionDigits) { + // update the entry in main table + entry |= (fraction << SIMPLE_CASE_COUNTRY_DEFAULT_DIGITS_SHIFT) + | (code.charAt(2) - 'A'); + setMainTableEntry(code.charAt(0), code.charAt(1), entry); } } } @@ -959,6 +1003,25 @@ return fractionAndNumericCode; } + // get the index based on currency code + private static int currencyCodeIndex(String code) { + int size = specialCasesList.size(); + for (int index = 0; index < size; index++) { + SpecialCaseEntry scEntry = specialCasesList.get(index); + if (scEntry.oldCurrency.equals(code) && (scEntry.cutOverTime == Long.MAX_VALUE + || System.currentTimeMillis() < scEntry.cutOverTime)) { + //consider only when there is no new currency or cutover time is not passed + return index; + } else if (scEntry.newCurrency.equals(code) + && System.currentTimeMillis() >= scEntry.cutOverTime) { + //consider only if the cutover time is passed + return index; + } + } + return -1; + } + + // convert the special case entry to sc arrays index private static int toIndex(int tableEntry) { return (tableEntry & SPECIAL_CASE_COUNTRY_INDEX_MASK) - SPECIAL_CASE_COUNTRY_INDEX_DELTA; @@ -999,6 +1062,136 @@ } + + /* + * Used to represent an entry of the properties file that + * java.util.currency.data designates + * + * - country: country representing the currency entry + * - currencyCode: currency code + * - fraction: default fraction digit + * - numericCode: numeric code + * - date: cutover date + */ + private static class CurrencyProperty { + final private String country; + final private String currencyCode; + final private int fraction; + final private int numericCode; + final private String date; + + private CurrencyProperty(String country, String currencyCode, + int fraction, int numericCode, String date) { + this.country = country; + this.currencyCode = currencyCode; + this.fraction = fraction; + this.numericCode = numericCode; + this.date = date; + } + + /** + * Check the valid currency data and create/return an Optional instance + * of CurrencyProperty + * + * @param ctry country representing the currency data + * @param curData currency data of the given {@code ctry} + * @param pattern regex pattern for the properties entry + * @return Optional containing CurrencyProperty instance, If valid; + * empty otherwise + */ + private static Optional getValidEntry(String ctry, + String curData, + Pattern pattern) { + + CurrencyProperty prop = null; + + if (ctry.length() != 2) { + // Invalid country code. Ignore the entry. + } else { + + prop = parseProperty(ctry, curData, pattern); + // if the property entry failed any of the below checked + // criteria it is ignored + if (prop == null + || (prop.date == null && curData.chars() + .map(c -> c == ',' ? 1 : 0).sum() >= 3)) { + // format is not recognized. ignore the data if date + // string is null and we've 4 values, bad date value + prop = null; + } else if (prop.fraction + > SIMPLE_CASE_COUNTRY_MAX_DEFAULT_DIGITS) { + prop = null; + } else { + try { + if (prop.date != null + && !isPastCutoverDate(prop.date)) { + prop = null; + } + } catch (ParseException ex) { + prop = null; + } + } + } + + if (prop == null) { + info("The property entry for " + ctry + " is invalid." + + " Ignored.", null); + } + + return Optional.ofNullable(prop); + } + + /* + * Parse properties entry and return CurrencyProperty instance + */ + private static CurrencyProperty parseProperty(String ctry, + String curData, Pattern pattern) { + Matcher m = pattern.matcher(curData); + if (!m.find()) { + return null; + } else { + return new CurrencyProperty(ctry, m.group(1), + Integer.parseInt(m.group(3)), + Integer.parseInt(m.group(2)), m.group(4)); + } + } + + /** + * Checks if the given list contains multiple inconsistent currency instances + */ + private static boolean containsInconsistentInstances( + List list) { + int numCode = list.get(0).numericCode; + int fractionDigit = list.get(0).fraction; + return list.stream().anyMatch(prop -> prop.numericCode != numCode + || prop.fraction != fractionDigit); + } + + private static boolean isPastCutoverDate(String s) + throws ParseException { + SimpleDateFormat format = new SimpleDateFormat( + "yyyy-MM-dd'T'HH:mm:ss", Locale.ROOT); + format.setTimeZone(TimeZone.getTimeZone("UTC")); + format.setLenient(false); + long time = format.parse(s.trim()).getTime(); + return System.currentTimeMillis() > time; + + } + + private static void info(String message, Throwable t) { + PlatformLogger logger = PlatformLogger + .getLogger("java.util.Currency"); + if (logger.isLoggable(PlatformLogger.Level.INFO)) { + if (t != null) { + logger.info(message, t); + } else { + logger.info(message); + } + } + } + + } + } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/util/Formatter.java --- a/src/java.base/share/classes/java/util/Formatter.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/util/Formatter.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -284,11 +284,11 @@ * {@code 'A'}, and {@code 'T'}) are the same as those for the corresponding * lower-case conversion characters except that the result is converted to * upper case according to the rules of the prevailing {@link java.util.Locale - * Locale}. The result is equivalent to the following invocation of {@link - * String#toUpperCase(Locale)} - * - *

- *    out.toUpperCase(Locale.getDefault(Locale.Category.FORMAT)) 
+ * Locale}. If there is no explicit locale specified, either at the + * construction of the instance or as a parameter to its method + * invocation, then the {@link java.util.Locale.Category#FORMAT default locale} + * is used. + * * * * @@ -709,11 +709,10 @@ * {@code 'G'}, {@code 'A'}, and {@code 'T'}) are the same as those for the * corresponding lower-case conversion characters except that the result is * converted to upper case according to the rules of the prevailing {@link - * java.util.Locale Locale}. The result is equivalent to the following - * invocation of {@link String#toUpperCase(Locale)} - * - *
- *    out.toUpperCase(Locale.getDefault(Locale.Category.FORMAT)) 
+ * java.util.Locale Locale}. If there is no explicit locale specified, + * either at the construction of the instance or as a parameter to its method + * invocation, then the {@link java.util.Locale.Category#FORMAT default locale} + * is used. * *

General

* @@ -2897,16 +2896,16 @@ break; case Conversion.CHARACTER: case Conversion.CHARACTER_UPPER: - printCharacter(arg); + printCharacter(arg, l); break; case Conversion.BOOLEAN: - printBoolean(arg); + printBoolean(arg, l); break; case Conversion.STRING: printString(arg, l); break; case Conversion.HASHCODE: - printHashCode(arg); + printHashCode(arg, l); break; case Conversion.LINE_SEPARATOR: a.append(System.lineSeparator()); @@ -2921,7 +2920,7 @@ private void printInteger(Object arg, Locale l) throws IOException { if (arg == null) - print("null"); + print("null", l); else if (arg instanceof Byte) print(((Byte)arg).byteValue(), l); else if (arg instanceof Short) @@ -2938,7 +2937,7 @@ private void printFloat(Object arg, Locale l) throws IOException { if (arg == null) - print("null"); + print("null", l); else if (arg instanceof Float) print(((Float)arg).floatValue(), l); else if (arg instanceof Double) @@ -2951,7 +2950,7 @@ private void printDateTime(Object arg, Locale l) throws IOException { if (arg == null) { - print("null"); + print("null", l); return; } Calendar cal = null; @@ -2982,9 +2981,9 @@ print(cal, c, l); } - private void printCharacter(Object arg) throws IOException { + private void printCharacter(Object arg, Locale l) throws IOException { if (arg == null) { - print("null"); + print("null", l); return; } String s = null; @@ -3011,7 +3010,7 @@ } else { failConversion(c, arg); } - print(s); + print(s, l); } private void printString(Object arg, Locale l) throws IOException { @@ -3024,13 +3023,13 @@ if (f.contains(Flags.ALTERNATE)) failMismatch(Flags.ALTERNATE, 's'); if (arg == null) - print("null"); + print("null", l); else - print(arg.toString()); + print(arg.toString(), l); } } - private void printBoolean(Object arg) throws IOException { + private void printBoolean(Object arg, Locale l) throws IOException { String s; if (arg != null) s = ((arg instanceof Boolean) @@ -3038,24 +3037,29 @@ : Boolean.toString(true)); else s = Boolean.toString(false); - print(s); + print(s, l); } - private void printHashCode(Object arg) throws IOException { + private void printHashCode(Object arg, Locale l) throws IOException { String s = (arg == null ? "null" : Integer.toHexString(arg.hashCode())); - print(s); + print(s, l); } - private void print(String s) throws IOException { + private void print(String s, Locale l) throws IOException { if (precision != -1 && precision < s.length()) s = s.substring(0, precision); if (f.contains(Flags.UPPERCASE)) - s = s.toUpperCase(Locale.getDefault(Locale.Category.FORMAT)); + s = toUpperCaseWithLocale(s, l); appendJustified(a, s); } + private String toUpperCaseWithLocale(String s, Locale l) { + return s.toUpperCase(Objects.requireNonNullElse(l, + Locale.getDefault(Locale.Category.FORMAT))); + } + private Appendable appendJustified(Appendable a, CharSequence cs) throws IOException { if (width == -1) { return a.append(cs); @@ -3276,7 +3280,7 @@ trailingZeros(sb, width - len); } if (f.contains(Flags.UPPERCASE)) - s = s.toUpperCase(Locale.getDefault(Locale.Category.FORMAT)); + s = toUpperCaseWithLocale(s, l); sb.append(s); } @@ -3351,7 +3355,7 @@ trailingZeros(sb, width - len); } if (f.contains(Flags.UPPERCASE)) - s = s.toUpperCase(Locale.getDefault(Locale.Category.FORMAT)); + s = toUpperCaseWithLocale(s, l); sb.append(s); } @@ -3950,7 +3954,7 @@ // justify based on width if (f.contains(Flags.UPPERCASE)) { - appendJustified(a, sb.toString().toUpperCase(Locale.getDefault(Locale.Category.FORMAT))); + appendJustified(a, toUpperCaseWithLocale(sb.toString(), l)); } else { appendJustified(a, sb); } @@ -4132,8 +4136,7 @@ StringBuilder tsb = new StringBuilder(); print(tsb, t, DateTime.AM_PM, l); - sb.append(tsb.toString().toUpperCase(Objects.requireNonNullElse(l, - Locale.getDefault(Locale.Category.FORMAT)))); + sb.append(toUpperCaseWithLocale(tsb.toString(), l)); break; } case DateTime.DATE_TIME: { // 'c' (Sat Nov 04 12:02:33 EST 1999) @@ -4171,7 +4174,7 @@ print(sb, t, c, l); // justify based on width if (f.contains(Flags.UPPERCASE)) { - appendJustified(a, sb.toString().toUpperCase(Locale.getDefault(Locale.Category.FORMAT))); + appendJustified(a, toUpperCaseWithLocale(sb.toString(), l)); } else { appendJustified(a, sb); } @@ -4373,8 +4376,7 @@ // this may be in wrong place for some locales StringBuilder tsb = new StringBuilder(); print(tsb, t, DateTime.AM_PM, l); - sb.append(tsb.toString().toUpperCase(Objects.requireNonNullElse(l, - Locale.getDefault(Locale.Category.FORMAT)))); + sb.append(toUpperCaseWithLocale(tsb.toString(), l)); break; } case DateTime.DATE_TIME: { // 'c' (Sat Nov 04 12:02:33 EST 1999) diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/util/jar/Attributes.java --- a/src/java.base/share/classes/java/util/jar/Attributes.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/util/jar/Attributes.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,18 +25,16 @@ package java.util.jar; -import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.IOException; +import java.util.Collection; +import java.util.Comparator; import java.util.LinkedHashMap; +import java.util.Locale; import java.util.Map; import java.util.Set; -import java.util.Collection; -import java.util.AbstractSet; -import java.util.Iterator; -import java.util.Locale; + import sun.util.logging.PlatformLogger; -import java.util.Comparator; /** * The Attributes class maps Manifest attribute names to associated string diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/java/util/regex/PatternSyntaxException.java --- a/src/java.base/share/classes/java/util/regex/PatternSyntaxException.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/java/util/regex/PatternSyntaxException.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,9 +25,6 @@ package java.util.regex; -import sun.security.action.GetPropertyAction; - - /** * Unchecked exception thrown to indicate a syntax error in a * regular-expression pattern. @@ -93,9 +90,6 @@ return pattern; } - private static final String nl = - GetPropertyAction.privilegedGetProperty("line.separator"); - /** * Returns a multi-line string containing the description of the syntax * error and its index, the erroneous regular-expression pattern, and a @@ -110,10 +104,10 @@ sb.append(" near index "); sb.append(index); } - sb.append(nl); + sb.append(System.lineSeparator()); sb.append(pattern); if (index >= 0) { - sb.append(nl); + sb.append(System.lineSeparator()); for (int i = 0; i < index; i++) sb.append(' '); sb.append('^'); } diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/jdk/internal/misc/JavaIOFileDescriptorAccess.java --- a/src/java.base/share/classes/jdk/internal/misc/JavaIOFileDescriptorAccess.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/misc/JavaIOFileDescriptorAccess.java Mon Feb 26 10:36:34 2018 -0800 @@ -40,7 +40,7 @@ public boolean getAppend(FileDescriptor fdo); public void close(FileDescriptor fdo) throws IOException; public void registerCleanup(FileDescriptor fdo); - public void registerCleanup(FileDescriptor fdo, PhantomCleanable cleanable); + public void registerCleanup(FileDescriptor fdo, PhantomCleanable cleanable); public void unregisterCleanup(FileDescriptor fdo); // Only valid on Windows diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/jdk/internal/misc/VM.java --- a/src/java.base/share/classes/jdk/internal/misc/VM.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/misc/VM.java Mon Feb 26 10:36:34 2018 -0800 @@ -27,9 +27,7 @@ import static java.lang.Thread.State.*; import java.util.Map; -import java.util.HashMap; import java.util.Properties; -import java.util.Collections; public class VM { @@ -38,6 +36,8 @@ private static final int MODULE_SYSTEM_INITED = 2; private static final int SYSTEM_LOADER_INITIALIZING = 3; private static final int SYSTEM_BOOTED = 4; + private static final int SYSTEM_SHUTDOWN = 5; + // 0, 1, 2, ... private static volatile int initLevel; @@ -52,7 +52,7 @@ */ public static void initLevel(int value) { synchronized (lock) { - if (value <= initLevel || value > SYSTEM_BOOTED) + if (value <= initLevel || value > SYSTEM_SHUTDOWN) throw new InternalError("Bad level: " + value); initLevel = value; lock.notifyAll(); @@ -94,6 +94,23 @@ return initLevel >= SYSTEM_BOOTED; } + /** + * Set shutdown state. Shutdown completes when all registered shutdown + * hooks have been run. + * + * @see java.lang.Shutdown + */ + public static void shutdown() { + initLevel(SYSTEM_SHUTDOWN); + } + + /** + * Returns {@code true} if the VM has been shutdown + */ + public static boolean isShutdown() { + return initLevel == SYSTEM_SHUTDOWN; + } + // A user-settable upper limit on the maximum amount of allocatable direct // buffer memory. This value may be changed during VM initialization if // "java" is launched with "-XX:MaxDirectMemorySize=". diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/jdk/internal/util/xml/impl/XMLStreamWriterImpl.java --- a/src/java.base/share/classes/jdk/internal/util/xml/impl/XMLStreamWriterImpl.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/util/xml/impl/XMLStreamWriterImpl.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,8 +75,7 @@ //pretty print by default private boolean _doIndent = true; //The system line separator for writing out line breaks. - private char[] _lineSep = - System.getProperty("line.separator").toCharArray(); + private char[] _lineSep = System.lineSeparator().toCharArray(); public XMLStreamWriterImpl(OutputStream os) throws XMLStreamException { this(os, XMLStreamWriter.DEFAULT_CHARSET); diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/classes/jdk/internal/util/xml/impl/XMLWriter.java --- a/src/java.base/share/classes/jdk/internal/util/xml/impl/XMLWriter.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/classes/jdk/internal/util/xml/impl/XMLWriter.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -123,7 +123,7 @@ } private void nl() throws XMLStreamException { - String lineEnd = System.getProperty("line.separator"); + String lineEnd = System.lineSeparator(); try { _writer.write(lineEnd); } catch (IOException e) { diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/native/libjava/Shutdown.c --- a/src/java.base/share/native/libjava/Shutdown.c Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/native/libjava/Shutdown.c Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2003, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,17 +35,3 @@ { JVM_Halt(code); } - - -JNIEXPORT void JNICALL -Java_java_lang_Shutdown_runAllFinalizers(JNIEnv *env, jclass ignored) -{ - jclass cl; - jmethodID mid; - - if ((cl = (*env)->FindClass(env, "java/lang/ref/Finalizer")) - && (mid = (*env)->GetStaticMethodID(env, cl, - "runAllFinalizers", "()V"))) { - (*env)->CallStaticVoidMethod(env, cl, mid); - } -} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/share/native/libjava/System.c --- a/src/java.base/share/native/libjava/System.c Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/share/native/libjava/System.c Mon Feb 26 10:36:34 2018 -0800 @@ -125,7 +125,6 @@ #define JAVA_SPECIFICATION_VENDOR "Oracle Corporation" #endif -static int fmtdefault; // boolean value jobject fillI18nProps(JNIEnv *env, jobject props, char *baseKey, char *platformDispVal, char *platformFmtVal, jmethodID putID, jmethodID getPropID) { @@ -141,16 +140,9 @@ const char *baseVal = ""; /* user.xxx base property */ - if (fmtdefault) { - if (platformFmtVal) { - PUTPROP(props, baseKey, platformFmtVal); - baseVal = platformFmtVal; - } - } else { - if (platformDispVal) { - PUTPROP(props, baseKey, platformDispVal); - baseVal = platformDispVal; - } + if (platformDispVal) { + PUTPROP(props, baseKey, platformDispVal); + baseVal = platformDispVal; } /* user.xxx.display property */ @@ -402,16 +394,6 @@ ret = JVM_InitProperties(env, props); - /* Check the compatibility flag */ - GETPROP(props, "sun.locale.formatasdefault", jVMVal); - if (jVMVal) { - const char * val = (*env)->GetStringUTFChars(env, jVMVal, 0); - CHECK_NULL_RETURN(val, NULL); - fmtdefault = !strcmp(val, "true"); - (*env)->ReleaseStringUTFChars(env, jVMVal, val); - (*env)->DeleteLocalRef(env, jVMVal); - } - /* reconstruct i18n related properties */ fillI18nProps(env, props, "user.language", sprops->display_language, sprops->format_language, putID, getPropID); @@ -430,11 +412,7 @@ */ PUTPROP(props, "file.encoding", sprops->encoding); #else - if (fmtdefault) { - PUTPROP(props, "file.encoding", sprops->encoding); - } else { - PUTPROP(props, "file.encoding", sprops->sun_jnu_encoding); - } + PUTPROP(props, "file.encoding", sprops->sun_jnu_encoding); #endif } else { (*env)->DeleteLocalRef(env, jVMVal); diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/unix/classes/java/io/FileDescriptor.java --- a/src/java.base/unix/classes/java/io/FileDescriptor.java Fri Feb 23 12:30:03 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,394 +0,0 @@ -/* - * Copyright (c) 1995, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package java.io; - -import java.lang.ref.Cleaner; -import java.util.ArrayList; -import java.util.List; - -import jdk.internal.misc.JavaIOFileDescriptorAccess; -import jdk.internal.misc.SharedSecrets; -import jdk.internal.ref.CleanerFactory; -import jdk.internal.ref.PhantomCleanable; - -/** - * Instances of the file descriptor class serve as an opaque handle - * to the underlying machine-specific structure representing an open - * file, an open socket, or another source or sink of bytes. - * The main practical use for a file descriptor is to create a - * {@link FileInputStream} or {@link FileOutputStream} to contain it. - *

- * Applications should not create their own file descriptors. - * - * @author Pavani Diwanji - * @since 1.0 - */ -public final class FileDescriptor { - - private int fd; - - private Closeable parent; - private List otherParents; - private boolean closed; - - /** - * true, if file is opened for appending. - */ - private boolean append; - - static { - initIDs(); - } - - // Set up JavaIOFileDescriptorAccess in SharedSecrets - static { - SharedSecrets.setJavaIOFileDescriptorAccess( - new JavaIOFileDescriptorAccess() { - public void set(FileDescriptor fdo, int fd) { - fdo.set(fd); - } - - public int get(FileDescriptor fdo) { - return fdo.fd; - } - - public void setAppend(FileDescriptor fdo, boolean append) { - fdo.append = append; - } - - public boolean getAppend(FileDescriptor fdo) { - return fdo.append; - } - - public void close(FileDescriptor fdo) throws IOException { - fdo.close(); - } - - public void registerCleanup(FileDescriptor fdo) { - fdo.registerCleanup(); - } - - public void registerCleanup(FileDescriptor fdo, PhantomCleanable cleanup) { - fdo.registerCleanup(cleanup); - } - - public void unregisterCleanup(FileDescriptor fdo) { - fdo.unregisterCleanup(); - } - - public void setHandle(FileDescriptor fdo, long handle) { - throw new UnsupportedOperationException(); - } - - public long getHandle(FileDescriptor fdo) { - throw new UnsupportedOperationException(); - } - } - ); - } - - /** - * Cleanup in case FileDescriptor is not explicitly closed. - */ - private PhantomCleanable cleanup; - - /** - * Constructs an (invalid) FileDescriptor - * object. - */ - public FileDescriptor() { - fd = -1; - } - - private FileDescriptor(int fd) { - this.fd = fd; - this.append = getAppend(fd); - } - - /** - * A handle to the standard input stream. Usually, this file - * descriptor is not used directly, but rather via the input stream - * known as {@code System.in}. - * - * @see java.lang.System#in - */ - public static final FileDescriptor in = new FileDescriptor(0); - - /** - * A handle to the standard output stream. Usually, this file - * descriptor is not used directly, but rather via the output stream - * known as {@code System.out}. - * @see java.lang.System#out - */ - public static final FileDescriptor out = new FileDescriptor(1); - - /** - * A handle to the standard error stream. Usually, this file - * descriptor is not used directly, but rather via the output stream - * known as {@code System.err}. - * - * @see java.lang.System#err - */ - public static final FileDescriptor err = new FileDescriptor(2); - - /** - * Tests if this file descriptor object is valid. - * - * @return {@code true} if the file descriptor object represents a - * valid, open file, socket, or other active I/O connection; - * {@code false} otherwise. - */ - public boolean valid() { - return fd != -1; - } - - /** - * Force all system buffers to synchronize with the underlying - * device. This method returns after all modified data and - * attributes of this FileDescriptor have been written to the - * relevant device(s). In particular, if this FileDescriptor - * refers to a physical storage medium, such as a file in a file - * system, sync will not return until all in-memory modified copies - * of buffers associated with this FileDescriptor have been - * written to the physical medium. - * - * sync is meant to be used by code that requires physical - * storage (such as a file) to be in a known state For - * example, a class that provided a simple transaction facility - * might use sync to ensure that all changes to a file caused - * by a given transaction were recorded on a storage medium. - * - * sync only affects buffers downstream of this FileDescriptor. If - * any in-memory buffering is being done by the application (for - * example, by a BufferedOutputStream object), those buffers must - * be flushed into the FileDescriptor (for example, by invoking - * OutputStream.flush) before that data will be affected by sync. - * - * @exception SyncFailedException - * Thrown when the buffers cannot be flushed, - * or because the system cannot guarantee that all the - * buffers have been synchronized with physical media. - * @since 1.1 - */ - public native void sync() throws SyncFailedException; - - /* This routine initializes JNI field offsets for the class */ - private static native void initIDs(); - - /** - * Set the fd. - * If setting to -1, clear the cleaner. - * The {@link #registerCleanup()} method should be called for new fds. - * @param fd the fd or -1 to indicate closed - */ - @SuppressWarnings("unchecked") - synchronized void set(int fd) { - if (fd == -1 && cleanup != null) { - cleanup.clear(); - cleanup = null; - } - this.fd = fd; - } - - /** - * Register a cleanup for the current handle. - * Used directly in java.io and indirectly via fdAccess. - * The cleanup should be registered after the handle is set in the FileDescriptor. - */ - @SuppressWarnings("unchecked") - void registerCleanup() { - registerCleanup(null); - } - - /** - * Register a cleanup for the current handle. - * Used directly in java.io and indirectly via fdAccess. - * The cleanup should be registered after the handle is set in the FileDescriptor. - * @param newCleanable a PhantomCleanable to register - */ - @SuppressWarnings("unchecked") - synchronized void registerCleanup(PhantomCleanable newCleanable) { - if (cleanup != null) { - cleanup.clear(); - } - cleanup = (newCleanable == null) ? FDCleanup.create(this) : newCleanable; - } - - /** - * Unregister a cleanup for the current raw fd. - * Used directly in java.io and indirectly via fdAccess. - * Normally {@link #close()} should be used except in cases where - * it is certain the caller will close the raw fd and the cleanup - * must not close the raw fd. {@link #unregisterCleanup()} must be - * called before the raw fd is closed to prevent a race that makes - * it possible for the fd to be reallocated to another use and later - * the cleanup might be invoked. - */ - synchronized void unregisterCleanup() { - if (cleanup != null) { - cleanup.clear(); - } - cleanup = null; - } - - /** - * Returns true, if the file was opened for appending. - */ - private static native boolean getAppend(int fd); - - /** - * Close the raw file descriptor or handle, if it has not already been closed - * and set the fd and handle to -1. - * Clear the cleaner so the close does not happen twice. - * Package private to allow it to be used in java.io. - * @throws IOException if close fails - */ - @SuppressWarnings("unchecked") - synchronized void close() throws IOException { - if (cleanup != null) { - cleanup.clear(); - cleanup = null; - } - close0(); - } - - /* - * Close the raw file descriptor or handle, if it has not already been closed - * and set the fd and handle to -1. - */ - private native void close0() throws IOException; - - /* - * Raw close of the file descriptor. - * Used only for last chance cleanup. - */ - private static native void cleanupClose0(int fd) throws IOException; - - /* - * Package private methods to track referents. - * If multiple streams point to the same FileDescriptor, we cycle - * through the list of all referents and call close() - */ - - /** - * Attach a Closeable to this FD for tracking. - * parent reference is added to otherParents when - * needed to make closeAll simpler. - */ - synchronized void attach(Closeable c) { - if (parent == null) { - // first caller gets to do this - parent = c; - } else if (otherParents == null) { - otherParents = new ArrayList<>(); - otherParents.add(parent); - otherParents.add(c); - } else { - otherParents.add(c); - } - } - - /** - * Cycle through all Closeables sharing this FD and call - * close() on each one. - * - * The caller closeable gets to call close0(). - */ - @SuppressWarnings("try") - synchronized void closeAll(Closeable releaser) throws IOException { - if (!closed) { - closed = true; - IOException ioe = null; - try (releaser) { - if (otherParents != null) { - for (Closeable referent : otherParents) { - try { - referent.close(); - } catch(IOException x) { - if (ioe == null) { - ioe = x; - } else { - ioe.addSuppressed(x); - } - } - } - } - } catch(IOException ex) { - /* - * If releaser close() throws IOException - * add other exceptions as suppressed. - */ - if (ioe != null) - ex.addSuppressed(ioe); - ioe = ex; - } finally { - if (ioe != null) - throw ioe; - } - } - } - - /** - * Cleanup for a FileDescriptor when it becomes phantom reachable. - * Create a cleanup if fd != -1. - * Subclassed from {@code PhantomCleanable} so that {@code clear} can be - * called to disable the cleanup when the fd is closed by any means other - * than calling {@link FileDescriptor#close}. - * Otherwise, it may close the native fd after it has been reused. - */ - static final class FDCleanup extends PhantomCleanable { - private final int fd; - - static FDCleanup create(FileDescriptor fdo) { - return fdo.fd == -1 - ? null - : new FDCleanup(fdo, CleanerFactory.cleaner(), fdo.fd); - } - - /** - * Constructor for a phantom cleanable reference. - * @param obj the object to monitor - * @param cleaner the cleaner - * @param fd file descriptor to close - */ - private FDCleanup(Object obj, Cleaner cleaner, int fd) { - super(obj, cleaner); - this.fd = fd; - } - - /** - * Close the native fd. - */ - @Override - protected void performCleanup() { - try { - cleanupClose0(fd); - } catch (IOException ioe) { - throw new UncheckedIOException("close", ioe); - } - } - } -} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/unix/classes/java/io/UnixFileSystem.java --- a/src/java.base/unix/classes/java/io/UnixFileSystem.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/unix/classes/java/io/UnixFileSystem.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,12 +34,14 @@ private final char slash; private final char colon; private final String javaHome; + private final String userDir; public UnixFileSystem() { Properties props = GetPropertyAction.privilegedGetProperties(); slash = props.getProperty("file.separator").charAt(0); colon = props.getProperty("path.separator").charAt(0); javaHome = props.getProperty("java.home"); + userDir = props.getProperty("user.dir"); } @@ -128,7 +130,11 @@ public String resolve(File f) { if (isAbsolute(f)) return f.getPath(); - return resolve(System.getProperty("user.dir"), f.getPath()); + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPropertyAccess("user.dir"); + } + return resolve(userDir, f.getPath()); } // Caches for canonicalization results to improve startup performance. diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/unix/native/libjava/FileDescriptor_md.c --- a/src/java.base/unix/native/libjava/FileDescriptor_md.c Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/unix/native/libjava/FileDescriptor_md.c Mon Feb 26 10:36:34 2018 -0800 @@ -64,6 +64,10 @@ JNU_ThrowByName(env, "java/io/SyncFailedException", "sync failed"); } } +JNIEXPORT jlong JNICALL +Java_java_io_FileDescriptor_getHandle(JNIEnv *env, jclass fdClass, jint fd) { + return -1; +} JNIEXPORT jboolean JNICALL Java_java_io_FileDescriptor_getAppend(JNIEnv *env, jclass fdClass, jint fd) { @@ -71,17 +75,17 @@ return ((flags & O_APPEND) == 0) ? JNI_FALSE : JNI_TRUE; } +// instance method close0 for FileDescriptor JNIEXPORT void JNICALL -Java_java_io_FileDescriptor_cleanupClose0(JNIEnv *env, jclass fdClass, jint fd) { +Java_java_io_FileDescriptor_close0(JNIEnv *env, jobject this) { + fileDescriptorClose(env, this); +} + +JNIEXPORT void JNICALL +Java_java_io_FileCleanable_cleanupClose0(JNIEnv *env, jclass fdClass, jint fd, jlong unused) { if (fd != -1) { if (close(fd) == -1) { JNU_ThrowIOExceptionWithLastError(env, "close failed"); } } } - -// instance method close0 for FileDescriptor -JNIEXPORT void JNICALL -Java_java_io_FileDescriptor_close0(JNIEnv *env, jobject this) { - fileDescriptorClose(env, this); -} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/windows/classes/java/io/FileDescriptor.java --- a/src/java.base/windows/classes/java/io/FileDescriptor.java Fri Feb 23 12:30:03 2018 +0530 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,400 +0,0 @@ -/* - * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. Oracle designates this - * particular file as subject to the "Classpath" exception as provided - * by Oracle in the LICENSE file that accompanied this code. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -package java.io; - -import java.lang.ref.Cleaner; -import java.util.ArrayList; -import java.util.List; - -import jdk.internal.misc.JavaIOFileDescriptorAccess; -import jdk.internal.misc.SharedSecrets; -import jdk.internal.ref.CleanerFactory; -import jdk.internal.ref.PhantomCleanable; - -/** - * Instances of the file descriptor class serve as an opaque handle - * to the underlying machine-specific structure representing an open - * file, an open socket, or another source or sink of bytes. - * The main practical use for a file descriptor is to create a - * {@link FileInputStream} or {@link FileOutputStream} to contain it. - *

- * Applications should not create their own file descriptors. - * - * @author Pavani Diwanji - * @since 1.0 - */ -public final class FileDescriptor { - - private int fd; - - private long handle; - - private Closeable parent; - private List otherParents; - private boolean closed; - - /** - * true, if file is opened for appending. - */ - private boolean append; - - static { - initIDs(); - } - - // Set up JavaIOFileDescriptorAccess in SharedSecrets - static { - SharedSecrets.setJavaIOFileDescriptorAccess( - new JavaIOFileDescriptorAccess() { - public void set(FileDescriptor fdo, int fd) { - fdo.fd = fd; - } - - public int get(FileDescriptor fdo) { - return fdo.fd; - } - - public void setAppend(FileDescriptor fdo, boolean append) { - fdo.append = append; - } - - public boolean getAppend(FileDescriptor fdo) { - return fdo.append; - } - - public void close(FileDescriptor fdo) throws IOException { - fdo.close(); - } - - public void registerCleanup(FileDescriptor fdo) { - fdo.registerCleanup(null); - } - - public void registerCleanup(FileDescriptor fdo, PhantomCleanable cleanup) { - fdo.registerCleanup(cleanup); - } - - public void unregisterCleanup(FileDescriptor fdo) { - fdo.unregisterCleanup(); - } - - public void setHandle(FileDescriptor fdo, long handle) { - fdo.setHandle(handle); - } - - public long getHandle(FileDescriptor fdo) { - return fdo.handle; - } - } - ); - } - - /** - * Cleanup in case FileDescriptor is not explicitly closed. - */ - private PhantomCleanable cleanup; - - /** - * Constructs an (invalid) FileDescriptor - * object. - */ - public FileDescriptor() { - fd = -1; - handle = -1; - } - - /** - * A handle to the standard input stream. Usually, this file - * descriptor is not used directly, but rather via the input stream - * known as {@code System.in}. - * - * @see java.lang.System#in - */ - public static final FileDescriptor in = standardStream(0); - - /** - * A handle to the standard output stream. Usually, this file - * descriptor is not used directly, but rather via the output stream - * known as {@code System.out}. - * @see java.lang.System#out - */ - public static final FileDescriptor out = standardStream(1); - - /** - * A handle to the standard error stream. Usually, this file - * descriptor is not used directly, but rather via the output stream - * known as {@code System.err}. - * - * @see java.lang.System#err - */ - public static final FileDescriptor err = standardStream(2); - - /** - * Tests if this file descriptor object is valid. - * - * @return {@code true} if the file descriptor object represents a - * valid, open file, socket, or other active I/O connection; - * {@code false} otherwise. - */ - public boolean valid() { - return (handle != -1) || (fd != -1); - } - - /** - * Force all system buffers to synchronize with the underlying - * device. This method returns after all modified data and - * attributes of this FileDescriptor have been written to the - * relevant device(s). In particular, if this FileDescriptor - * refers to a physical storage medium, such as a file in a file - * system, sync will not return until all in-memory modified copies - * of buffers associated with this FileDescriptor have been - * written to the physical medium. - * - * sync is meant to be used by code that requires physical - * storage (such as a file) to be in a known state For - * example, a class that provided a simple transaction facility - * might use sync to ensure that all changes to a file caused - * by a given transaction were recorded on a storage medium. - * - * sync only affects buffers downstream of this FileDescriptor. If - * any in-memory buffering is being done by the application (for - * example, by a BufferedOutputStream object), those buffers must - * be flushed into the FileDescriptor (for example, by invoking - * OutputStream.flush) before that data will be affected by sync. - * - * @exception SyncFailedException - * Thrown when the buffers cannot be flushed, - * or because the system cannot guarantee that all the - * buffers have been synchronized with physical media. - * @since 1.1 - */ - public native void sync() throws SyncFailedException; - - /* This routine initializes JNI field offsets for the class */ - private static native void initIDs(); - - private static FileDescriptor standardStream(int fd) { - FileDescriptor desc = new FileDescriptor(); - desc.handle = getHandle(fd); - return desc; - } - - private static native long getHandle(int d); - - /** - * Set the handle. - * If setting to -1, clear the cleaner. - * The {@link #registerCleanup()} method should be called for new handles. - * @param handle the handle or -1 to indicate closed - */ - @SuppressWarnings("unchecked") - void setHandle(long handle) { - if (handle == -1 && cleanup != null) { - cleanup.clear(); - cleanup = null; - } - this.handle = handle; - } - - /** - * Register a cleanup for the current handle. - * Used directly in java.io and indirectly via fdAccess. - * The cleanup should be registered after the handle is set in the FileDescriptor. - */ - @SuppressWarnings("unchecked") - void registerCleanup() { - registerCleanup(null); - } - - /** - * Register a cleanup for the current handle. - * Used directly in java.io and indirectly via fdAccess. - * The cleanup should be registered after the handle is set in the FileDescriptor. - * @param newCleanable a PhantomCleanable to register - */ - @SuppressWarnings("unchecked") - synchronized void registerCleanup(PhantomCleanable newCleanable) { - if (cleanup != null) { - cleanup.clear(); - } - cleanup = (newCleanable == null) ? FDCleanup.create(this) : newCleanable; - } - - /** - * Unregister a cleanup for the current raw fd. - * Used directly in java.io and indirectly via fdAccess. - * Normally {@link #close()} should be used except in cases where - * it is certain the caller will close the raw fd and the cleanup - * must not close the raw fd. {@link #unregisterCleanup()} must be - * called before the raw fd is closed to prevent a race that makes - * it possible for the fd to be reallocated to another use and later - * the cleanup might be invoked. - */ - synchronized void unregisterCleanup() { - if (cleanup != null) { - cleanup.clear(); - } - cleanup = null; - } - - /** - * Close the raw file descriptor or handle, if it has not already been closed - * and set the fd and handle to -1. - * Clear the cleaner so the close does not happen twice. - * Package private to allow it to be used in java.io. - * @throws IOException if close fails - */ - @SuppressWarnings("unchecked") - synchronized void close() throws IOException { - if (cleanup != null) { - cleanup.clear(); - cleanup = null; - } - close0(); - } - - /* - * Close the raw file descriptor or handle, if it has not already been closed - * and set the fd and handle to -1. - */ - private native void close0() throws IOException; - - /* - * Raw close of the file handle. - * Used only for last chance cleanup. - */ - private static native void cleanupClose0(long handle) throws IOException; - - /* - * Package private methods to track referents. - * If multiple streams point to the same FileDescriptor, we cycle - * through the list of all referents and call close() - */ - - /** - * Attach a Closeable to this FD for tracking. - * parent reference is added to otherParents when - * needed to make closeAll simpler. - */ - synchronized void attach(Closeable c) { - if (parent == null) { - // first caller gets to do this - parent = c; - } else if (otherParents == null) { - otherParents = new ArrayList<>(); - otherParents.add(parent); - otherParents.add(c); - } else { - otherParents.add(c); - } - } - - /** - * Cycle through all Closeables sharing this FD and call - * close() on each one. - * - * The caller closeable gets to call close0(). - */ - @SuppressWarnings("try") - synchronized void closeAll(Closeable releaser) throws IOException { - if (!closed) { - closed = true; - IOException ioe = null; - try (releaser) { - if (otherParents != null) { - for (Closeable referent : otherParents) { - try { - referent.close(); - } catch(IOException x) { - if (ioe == null) { - ioe = x; - } else { - ioe.addSuppressed(x); - } - } - } - } - } catch(IOException ex) { - /* - * If releaser close() throws IOException - * add other exceptions as suppressed. - */ - if (ioe != null) - ex.addSuppressed(ioe); - ioe = ex; - } finally { - if (ioe != null) - throw ioe; - } - } - } - - /** - * Cleanup for a FileDescriptor when it becomes phantom reachable. - * Create a cleanup if handle != -1. - * Windows closes files using handles and sockets via the fd. - * Network FileDescriptors using socket fd must provide their - * own PhantomCleanable to {@link #registerCleanup}. - * This implementation only clears thehandles. - *

- * Subclassed from {@code PhantomCleanable} so that {@code clear} can be - * called to disable the cleanup when the handle is closed by any means other - * than calling {@link FileDescriptor#close}. - * Otherwise, it may incorrectly close the handle after it has been reused. - */ - static final class FDCleanup extends PhantomCleanable { - private final long handle; - - static FDCleanup create(FileDescriptor fdo) { - return fdo.handle == -1L - ? null - : new FDCleanup(fdo, CleanerFactory.cleaner(), fdo.handle); - } - - /** - * Constructor for a phantom cleanable reference. - * @param obj the object to monitor - * @param cleaner the cleaner - * @param handle file handle to close - */ - private FDCleanup(Object obj, Cleaner cleaner, long handle) { - super(obj, cleaner); - this.handle = handle; - } - - /** - * Close the native handle. - */ - @Override - protected void performCleanup() { - try { - cleanupClose0(handle); - } catch (IOException ioe) { - throw new UncheckedIOException("close", ioe); - } - } - } -} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/windows/classes/java/io/WinNTFileSystem.java --- a/src/java.base/windows/classes/java/io/WinNTFileSystem.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/windows/classes/java/io/WinNTFileSystem.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -43,12 +43,14 @@ private final char slash; private final char altSlash; private final char semicolon; + private final String userDir; public WinNTFileSystem() { Properties props = GetPropertyAction.privilegedGetProperties(); slash = props.getProperty("file.separator").charAt(0); semicolon = props.getProperty("path.separator").charAt(0); altSlash = (this.slash == '\\') ? '/' : '\\'; + userDir = props.getProperty("user.dir"); } private boolean isSlash(char c) { @@ -347,7 +349,11 @@ private String getUserPath() { /* For both compatibility and security, we must look this up every time */ - return normalize(System.getProperty("user.dir")); + SecurityManager sm = System.getSecurityManager(); + if (sm != null) { + sm.checkPropertyAccess("user.dir"); + } + return normalize(userDir); } private String getDrive(String path) { diff -r bec86eb4a71a -r 206a6f728ce5 src/java.base/windows/native/libjava/FileDescriptor_md.c --- a/src/java.base/windows/native/libjava/FileDescriptor_md.c Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.base/windows/native/libjava/FileDescriptor_md.c Mon Feb 26 10:36:34 2018 -0800 @@ -56,11 +56,6 @@ CHECK_NULL(IO_append_fdID = (*env)->GetFieldID(env, fdClass, "append", "Z")); } -JNIEXPORT jlong JNICALL -Java_java_io_FileDescriptor_getHandle(JNIEnv *env, jclass fdClass, jint fd) { - SET_HANDLE(fd); -} - /************************************************************** * File Descriptor */ @@ -73,13 +68,14 @@ } } -JNIEXPORT void JNICALL -Java_java_io_FileDescriptor_cleanupClose0(JNIEnv *env, jclass fdClass, jlong handle) { - if (handle != -1) { - if (CloseHandle((HANDLE)handle) == -1) { - JNU_ThrowIOExceptionWithLastError(env, "close failed"); - } - } +JNIEXPORT jlong JNICALL +Java_java_io_FileDescriptor_getHandle(JNIEnv *env, jclass fdClass, jint fd) { + SET_HANDLE(fd); +} + +JNIEXPORT jboolean JNICALL +Java_java_io_FileDescriptor_getAppend(JNIEnv *env, jclass fdClass, jint fd) { + return JNI_FALSE; } // instance method close0 for FileDescriptor @@ -87,3 +83,12 @@ Java_java_io_FileDescriptor_close0(JNIEnv *env, jobject this) { fileDescriptorClose(env, this); } + +JNIEXPORT void JNICALL +Java_java_io_FileCleanable_cleanupClose0(JNIEnv *env, jclass fdClass, jint unused, jlong handle) { + if (handle != -1) { + if (CloseHandle((HANDLE)handle) == -1) { + JNU_ThrowIOExceptionWithLastError(env, "close failed"); + } + } +} diff -r bec86eb4a71a -r 206a6f728ce5 src/java.security.jgss/share/classes/sun/security/krb5/internal/rcache/AuthList.java --- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/rcache/AuthList.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/rcache/AuthList.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,9 @@ private final LinkedList entries; private final int lifespan; + // entries.getLast().ctime, updated after each cleanup. + private volatile int oldestTime = Integer.MIN_VALUE; + /** * Constructs a AuthList. */ @@ -67,11 +70,13 @@ * Puts the authenticator timestamp into the cache in descending order, * and throw an exception if it's already there. */ - public void put(AuthTimeWithHash t, KerberosTime currentTime) + public synchronized void put(AuthTimeWithHash t, KerberosTime currentTime) throws KrbApErrException { if (entries.isEmpty()) { entries.addFirst(t); + oldestTime = t.ctime; + return; } else { AuthTimeWithHash temp = entries.getFirst(); int cmp = temp.compareTo(t); @@ -106,24 +111,26 @@ // let us cleanup while we are here long timeLimit = currentTime.getSeconds() - lifespan; - ListIterator it = entries.listIterator(0); - AuthTimeWithHash temp = null; - int index = -1; - while (it.hasNext()) { - // search expired timestamps. - temp = it.next(); - if (temp.ctime < timeLimit) { - index = entries.indexOf(temp); - break; + + // Only trigger a cleanup when the earliest entry is + // lifespan + 5 sec ago. This ensures a cleanup is done + // at most every 5 seconds so that we don't always + // addLast(removeLast). + if (oldestTime > timeLimit - 5) { + return; + } + + // and we remove the *enough* old ones (1 lifetime ago) + while (!entries.isEmpty()) { + AuthTimeWithHash removed = entries.removeLast(); + if (removed.ctime >= timeLimit) { + entries.addLast(removed); + oldestTime = removed.ctime; + return; } } - // It would be nice if LinkedList has a method called truncate(index). - if (index > -1) { - do { - // remove expired timestamps from the list. - entries.removeLast(); - } while(entries.size() > index); - } + + oldestTime = Integer.MIN_VALUE; } public boolean isEmpty() { diff -r bec86eb4a71a -r 206a6f728ce5 src/java.security.jgss/share/classes/sun/security/krb5/internal/rcache/MemoryCache.java --- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/rcache/MemoryCache.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/rcache/MemoryCache.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,9 @@ package sun.security.krb5.internal.rcache; -import java.util.*; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + import sun.security.krb5.internal.KerberosTime; import sun.security.krb5.internal.KrbApErrException; import sun.security.krb5.internal.ReplayCache; @@ -48,31 +50,18 @@ private static final int lifespan = KerberosTime.getDefaultSkew(); private static final boolean DEBUG = sun.security.krb5.internal.Krb5.DEBUG; - private final Map content = new HashMap<>(); + private final Map content = new ConcurrentHashMap<>(); @Override public synchronized void checkAndStore(KerberosTime currTime, AuthTimeWithHash time) throws KrbApErrException { String key = time.client + "|" + time.server; - AuthList rc = content.get(key); + content.computeIfAbsent(key, k -> new AuthList(lifespan)) + .put(time, currTime); if (DEBUG) { System.out.println("MemoryCache: add " + time + " to " + key); } - if (rc == null) { - rc = new AuthList(lifespan); - rc.put(time, currTime); - if (!rc.isEmpty()) { - content.put(key, rc); - } - } else { - if (DEBUG) { - System.out.println("MemoryCache: Existing AuthList:\n" + rc); - } - rc.put(time, currTime); - if (rc.isEmpty()) { - content.remove(key); - } - } + // TODO: clean up AuthList entries with only expired AuthTimeWithHash objects. } public String toString() { diff -r bec86eb4a71a -r 206a6f728ce5 src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToStream.java --- a/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToStream.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.xml/share/classes/com/sun/org/apache/xml/internal/serializer/ToStream.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2006, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved. */ /* * Licensed to the Apache Software Foundation (ASF) under one or more @@ -41,7 +41,6 @@ import javax.xml.transform.OutputKeys; import javax.xml.transform.Transformer; import javax.xml.transform.TransformerException; -import jdk.xml.internal.SecuritySupport; import org.w3c.dom.Node; import org.xml.sax.Attributes; import org.xml.sax.ContentHandler; @@ -52,7 +51,7 @@ * serializers (xml, html, text ...) that write output to a stream. * * @xsl.usage internal - * @LastModified: Nov 2017 + * @LastModified: Feb 2018 */ abstract public class ToStream extends SerializerBase { @@ -138,8 +137,7 @@ * but this value can be set through the xsl:output * extension attribute xalan:line-separator. */ - protected char[] m_lineSep = - SecuritySupport.getSystemProperty("line.separator").toCharArray(); + protected char[] m_lineSep = System.lineSeparator().toCharArray(); /** * True if the the system line separator is to be used. diff -r bec86eb4a71a -r 206a6f728ce5 src/java.xml/share/classes/jdk/xml/internal/SecuritySupport.java --- a/src/java.xml/share/classes/jdk/xml/internal/SecuritySupport.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/java.xml/share/classes/jdk/xml/internal/SecuritySupport.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -45,7 +45,7 @@ * This class contains utility methods for reading resources in the JAXP packages */ public class SecuritySupport { - public final static String NEWLINE = getSystemProperty("line.separator", "\n"); + public final static String NEWLINE = System.lineSeparator(); /** * Cache for properties in java.home/conf/jaxp.properties diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symbol.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -568,14 +568,14 @@ return hiddenSym; } - /** Is this symbol inherited into a given class? + /** Is this symbol accessible in a given class? * PRE: If symbol's owner is a interface, * it is already assumed that the interface is a superinterface - * of given class. + * the given class. * @param clazz The class for which we want to establish membership. * This must be a subclass of the member's owner. */ - public boolean isInheritedIn(Symbol clazz, Types types) { + public final boolean isAccessibleIn(Symbol clazz, Types types) { switch ((int)(flags_field & Flags.AccessFlags)) { default: // error recovery case PUBLIC: @@ -603,6 +603,17 @@ } } + /** Is this symbol inherited into a given class? + * PRE: If symbol's owner is a interface, + * it is already assumed that the interface is a superinterface + * of the given class. + * @param clazz The class for which we want to establish membership. + * This must be a subclass of the member's owner. + */ + public boolean isInheritedIn(Symbol clazz, Types types) { + return isAccessibleIn(clazz, types); + } + /** The (variable or method) symbol seen as a member of given * class type`site' (this might change the symbol's type). * This is used exclusively for producing diagnostics. diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Analyzer.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Analyzer.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Analyzer.java Mon Feb 26 10:36:34 2018 -0800 @@ -304,7 +304,7 @@ @Override List rewrite(JCNewClass oldTree){ - JCMethodDecl md = (JCMethodDecl)decls(oldTree.def).head; + JCMethodDecl md = (JCMethodDecl)copier.copy(decls(oldTree.def).head); List params = md.params; JCBlock body = md.body; JCLambda newTree = make.at(oldTree).Lambda(params, body); diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java Mon Feb 26 10:36:34 2018 -0800 @@ -2193,8 +2193,7 @@ List argtypes = argtypesBuf.toList(); List typeargtypes = attribTypes(tree.typeargs, localEnv); - // If we have made no mistakes in the class type... - if (clazztype.hasTag(CLASS)) { + if (clazztype.hasTag(CLASS) || clazztype.hasTag(ERROR)) { // Enums may not be instantiated except implicitly if ((clazztype.tsym.flags_field & Flags.ENUM) != 0 && (!env.tree.hasTag(VARDEF) || @@ -2381,7 +2380,8 @@ // If we already errored, be careful to avoid a further avalanche. ErrorType answers // false for isInterface call even when the original type is an interface. boolean implementing = clazztype.tsym.isInterface() || - clazztype.isErroneous() && clazztype.getOriginalType().tsym.isInterface(); + clazztype.isErroneous() && !clazztype.getOriginalType().hasTag(NONE) && + clazztype.getOriginalType().tsym.isInterface(); if (implementing) { cdef.implementing = List.of(clazz); @@ -2413,7 +2413,8 @@ finalargtypes = finalargtypes.map(deferredAttr.deferredCopier); } - clazztype = cdef.sym.type; + clazztype = clazztype.hasTag(ERROR) ? types.createErrorType(cdef.sym.type) + : cdef.sym.type; Symbol sym = tree.constructor = rs.resolveConstructor( tree.pos(), localEnv, clazztype, finalargtypes, typeargtypes); Assert.check(!sym.kind.isResolutionError()); @@ -5080,7 +5081,7 @@ */ private Type dummyMethodType(JCMethodDecl md) { Type restype = syms.unknownType; - if (md != null && md.restype.hasTag(TYPEIDENT)) { + if (md != null && md.restype != null && md.restype.hasTag(TYPEIDENT)) { JCPrimitiveTypeTree prim = (JCPrimitiveTypeTree)md.restype; if (prim.typetag == VOID) restype = syms.voidType; diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.compiler/share/classes/com/sun/tools/javac/model/JavacElements.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/model/JavacElements.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/model/JavacElements.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -632,8 +632,7 @@ } // Hidee must be accessible in hider's class. - // The method isInheritedIn is poorly named: it checks only access. - return hidee.isInheritedIn(hiderClass, types); + return hidee.isAccessibleIn(hiderClass, types); } @DefinedBy(Api.LANGUAGE_MODEL) diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java Mon Feb 26 10:36:34 2018 -0800 @@ -1673,6 +1673,8 @@ CAST, EXPLICIT_LAMBDA, IMPLICIT_LAMBDA, + IMPLICIT_LAMBDA_ALL_VAR, + BAD_LAMBDA, PARENS } @@ -1681,9 +1683,92 @@ formalParameters(true) : implicitParameters(hasParens); + if (explicitParams) { + LambdaClassfier lambdaClassfier = new LambdaClassfier(); + for (JCVariableDecl param: params) { + if (param.vartype != null && + isRestrictedLocalVarTypeName(param.vartype) && + param.vartype.hasTag(TYPEARRAY)) { + log.error(DiagnosticFlag.SYNTAX, param.pos, Errors.VarNotAllowedArray); + } + if (param.vartype != null && param.name != names.empty) { + if (isRestrictedLocalVarTypeName(param.vartype)) { + lambdaClassfier.addImplicitVarParameter(); + } else { + lambdaClassfier.addExplicitParameter(); + } + } + if (param.vartype == null && param.name != names.empty || + param.vartype != null && param.name == names.empty) { + lambdaClassfier.addImplicitParameter(); + } + if (lambdaClassfier.result() == ParensResult.BAD_LAMBDA) { + break; + } + } + if (lambdaClassfier.diagFragment != null) { + log.error(DiagnosticFlag.SYNTAX, pos, Errors.InvalidLambdaParameterDeclaration(lambdaClassfier.diagFragment)); + } + } return lambdaExpressionOrStatementRest(params, pos); } + class LambdaClassfier { + ParensResult kind; //ParensResult.EXPLICIT_LAMBDA; + Fragment diagFragment; + List params; + + void addExplicitParameter() { + reduce(ParensResult.EXPLICIT_LAMBDA); + } + + void addImplicitVarParameter() { + reduce(ParensResult.IMPLICIT_LAMBDA_ALL_VAR); + } + + void addImplicitParameter() { + reduce(ParensResult.IMPLICIT_LAMBDA); + } + + private void reduce(ParensResult newKind) { + if (kind == null) { + kind = newKind; + } else if (kind != newKind && kind != ParensResult.BAD_LAMBDA) { + ParensResult currentKind = kind; + kind = ParensResult.BAD_LAMBDA; + switch (currentKind) { + case EXPLICIT_LAMBDA: + if (newKind == ParensResult.IMPLICIT_LAMBDA) { + diagFragment = Fragments.ImplicitAndExplicitNotAllowed; + } else if (newKind == ParensResult.IMPLICIT_LAMBDA_ALL_VAR) { + diagFragment = Fragments.VarAndExplicitNotAllowed; + } + break; + case IMPLICIT_LAMBDA: + if (newKind == ParensResult.EXPLICIT_LAMBDA) { + diagFragment = Fragments.ImplicitAndExplicitNotAllowed; + } else if (newKind == ParensResult.IMPLICIT_LAMBDA_ALL_VAR) { + diagFragment = Fragments.VarAndImplicitNotAllowed; + } + break; + case IMPLICIT_LAMBDA_ALL_VAR: + if (newKind == ParensResult.EXPLICIT_LAMBDA) { + diagFragment = Fragments.VarAndExplicitNotAllowed; + } else if (newKind == ParensResult.IMPLICIT_LAMBDA) { + diagFragment = Fragments.VarAndImplicitNotAllowed; + } + break; + default: + throw new AssertionError("unexpected option for field kind"); + } + } + } + + ParensResult result() { + return kind; + } + } + JCExpression lambdaExpressionOrStatementRest(List args, int pos) { checkSourceLevel(Feature.LAMBDA); accept(ARROW); @@ -3044,7 +3129,21 @@ return toP(F.at(pos).ReceiverVarDef(mods, pn, type)); } } else { - name = ident(); + if (!lambdaParameter || + LAX_IDENTIFIER.accepts(token.kind) || + mods.flags != Flags.PARAMETER || + mods.annotations.nonEmpty()) { + name = ident(); + } else { + /** if it is a lambda parameter and the token kind is not an identifier, + * and there are no modifiers or annotations, then this means that the compiler + * supposed the lambda to be explicit but it can contain a mix of implicit, + * var or explicit parameters. So we assign the error name to the parameter name + * instead of issuing an error and analyze the lambda parameters as a whole at + * a higher level. + */ + name = names.empty; + } } } if ((mods.flags & Flags.VARARGS) != 0 && @@ -3947,7 +4046,7 @@ // need to distinguish between vararg annos and array annos // look at typeAnnotationsPushedBack comment this.permitTypeAnnotationsPushBack = true; - JCExpression type = parseType(); + JCExpression type = parseType(lambdaParameter); this.permitTypeAnnotationsPushBack = false; if (token.kind == ELLIPSIS) { diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties --- a/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties Mon Feb 26 10:36:34 2018 -0800 @@ -1229,6 +1229,20 @@ compiler.err.var.not.allowed.compound=\ ''var'' is not allowed in a compound declaration +# 0: fragment +compiler.err.invalid.lambda.parameter.declaration=\ + invalid lambda parameter declaration\n\ + ({0}) + +compiler.misc.implicit.and.explicit.not.allowed=\ + cannot mix implicitly-typed and explicitly-typed parameters + +compiler.misc.var.and.explicit.not.allowed=\ + cannot mix ''var'' and explicitly-typed parameters + +compiler.misc.var.and.implicit.not.allowed=\ + cannot mix ''var'' and implicitly-typed parameters + compiler.misc.local.cant.infer.null=\ variable initializer is ''null'' diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ArrayKlass.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,6 +57,7 @@ super(addr); } + public boolean isArrayKlass() { return true; } private static CIntField dimension; private static MetadataField higherDimension; private static MetadataField lowerDimension; diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -339,24 +339,28 @@ } // returns null, if not resolved. - public InstanceKlass getFieldOrMethodKlassRefAt(int which) { + public Klass getFieldOrMethodKlassRefAt(int which) { int refIndex = getFieldOrMethodAt(which); int klassIndex = extractLowShortFromInt(refIndex); - return (InstanceKlass) getKlassAt(klassIndex); + return getKlassAt(klassIndex); } // returns null, if not resolved. public Method getMethodRefAt(int which) { - InstanceKlass klass = getFieldOrMethodKlassRefAt(which); + Klass klass = getFieldOrMethodKlassRefAt(which); if (klass == null) return null; Symbol name = getNameRefAt(which); Symbol sig = getSignatureRefAt(which); - return klass.findMethod(name, sig); + // Consider the super class for arrays. (java.lang.Object) + if (klass.isArrayKlass()) { + klass = klass.getJavaSuper(); + } + return ((InstanceKlass)klass).findMethod(name, sig); } // returns null, if not resolved. public Field getFieldRefAt(int which) { - InstanceKlass klass = getFieldOrMethodKlassRefAt(which); + InstanceKlass klass = (InstanceKlass)getFieldOrMethodKlassRefAt(which); if (klass == null) return null; Symbol name = getNameRefAt(which); Symbol sig = getSignatureRefAt(which); diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Klass.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Klass.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Klass.java Mon Feb 26 10:36:34 2018 -0800 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -86,6 +86,7 @@ } public boolean isKlass() { return true; } + public boolean isArrayKlass() { return false; } // Fields private static AddressField javaMirror; diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.internal.le/share/classes/jdk/internal/jline/extra/EditingHistory.java --- a/src/jdk.internal.le/share/classes/jdk/internal/jline/extra/EditingHistory.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.internal.le/share/classes/jdk/internal/jline/extra/EditingHistory.java Mon Feb 26 10:36:34 2018 -0800 @@ -367,11 +367,11 @@ return count; } - public List currentSessionEntries() { + public List entries(boolean currentSession) { List result = new ArrayList<>(); for (Entry e : fullHistory) { - if (!(e.value() instanceof PersistentEntryMarker)) { + if (!currentSession || !(e.value() instanceof PersistentEntryMarker)) { result.add(e.value().toString()); } } diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/Main.java --- a/src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/Main.java Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/Main.java Mon Feb 26 10:36:34 2018 -0800 @@ -80,8 +80,8 @@ * - handling of covariant overrides * - handling of override of method found in multiple superinterfaces * - convert type/method/field output to Java source like syntax, e.g. - * instead of java/lang/Runtime.runFinalizersOnExit(Z)V - * print void java.lang.Runtime.runFinalizersOnExit(boolean) + * instead of java/lang/Character.isJavaLetter(C)Z + * print void java.lang.Character.isJavaLetter(char)boolean * - more example output in man page * - more rigorous GNU style option parsing; use joptsimple? * diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/internals.md --- a/src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/internals.md Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/internals.md Mon Feb 26 10:36:34 2018 -0800 @@ -195,15 +195,14 @@ **EXAMPLE OUTPUT** Given the following method declaration and annotation from the -`java.lang.Runtime` class, +`java.lang.Character` class, - @Deprecated(since="1.2", - forRemoval=true) - public static void runFinalizersOnExit(boolean value) + @Deprecated(since="1.1") + public static boolean isJavaLetter(char ch) the following line will be emitted from **jdeprscan -Xprint-csv**: - METHOD,java/lang/Runtime,runFinalizersOnExit(Z)V,1.2,true + METHOD,java/lang/Character,isJavaLetter(C)Z,1.1,false [RFC]: https://www.ietf.org/rfc/rfc4180.txt diff -r bec86eb4a71a -r 206a6f728ce5 src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/readme.md --- a/src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/readme.md Fri Feb 23 12:30:03 2018 +0530 +++ b/src/jdk.jdeps/share/classes/com/sun/tools/jdeprscan/readme.md Mon Feb 26 10:36:34 2018 -0800 @@ -1,6 +1,6 @@

genConv