# HG changeset patch # User ihse # Date 1528376473 -7200 # Node ID 28b415bc6f4db6bb35ef55f6a8ea7ae8f62fcc41 # Parent f0d5c39dfbc1d076d9e41c25bf0e4c556a597e23# Parent bd6b78feb6a330dbc29ced62d11feb981cb15648 Merge diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp --- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -214,11 +214,9 @@ assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code"); // Does store cross heap regions? - if (G1RSBarrierRegionFilter) { - __ xorr(tmp1, store_addr, new_val); - __ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes); - __ beq(CCR0, filtered); - } + __ xorr(tmp1, store_addr, new_val); + __ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes); + __ beq(CCR0, filtered); // Crosses regions, storing NULL? if (not_null) { diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp --- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -273,16 +273,14 @@ // Does store cross heap regions? // It does if the two addresses specify different grain addresses. - if (G1RSBarrierRegionFilter) { - if (VM_Version::has_DistinctOpnds()) { - __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val); - } else { - __ z_lgr(Rtmp1, Rstore_addr); - __ z_xgr(Rtmp1, Rnew_val); - } - __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); - __ z_bre(filtered); + if (VM_Version::has_DistinctOpnds()) { + __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val); + } else { + __ z_lgr(Rtmp1, Rstore_addr); + __ z_xgr(Rtmp1, Rnew_val); } + __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes); + __ z_bre(filtered); // Crosses regions, storing NULL? if (not_null) { diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp --- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -369,12 +369,10 @@ G1BarrierSet* bs = barrier_set_cast(BarrierSet::barrier_set()); - if (G1RSBarrierRegionFilter) { - __ xor3(store_addr, new_val, tmp); - __ srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); + __ xor3(store_addr, new_val, tmp); + __ srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp); - __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); - } + __ cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered); // If the "store_addr" register is an "in" or "local" register, move it to // a scratch reg so we can pass it as an argument. diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -0,0 +1,456 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "classfile/classLoaderData.inline.hpp" +#include "classfile/classLoaderHierarchyDCmd.hpp" +#include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/safepoint.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" + + +ClassLoaderHierarchyDCmd::ClassLoaderHierarchyDCmd(outputStream* output, bool heap) + : DCmdWithParser(output, heap) + , _show_classes("show-classes", "Print loaded classes.", "BOOLEAN", false, "false") + , _verbose("verbose", "Print detailed information.", "BOOLEAN", false, "false") { + _dcmdparser.add_dcmd_option(&_show_classes); + _dcmdparser.add_dcmd_option(&_verbose); +} + + +int ClassLoaderHierarchyDCmd::num_arguments() { + ResourceMark rm; + ClassLoaderHierarchyDCmd* dcmd = new ClassLoaderHierarchyDCmd(NULL, false); + if (dcmd != NULL) { + DCmdMark mark(dcmd); + return dcmd->_dcmdparser.num_arguments(); + } else { + return 0; + } +} + +// Helper class for drawing the branches to the left of a node. +class BranchTracker : public StackObj { + // "" + // " |---" + // " | | + // " | " + // " | |--- + // " | |--- + // ^^^^^^^ ^^^ + // A B + + // Some terms for the graphics: + // - branch: vertical connection between a node's ancestor to a later sibling. + // - branchwork: (A) the string to print as a prefix at the start of each line, contains all branches. + // - twig (B): Length of the dashed line connecting a node to its branch. + // - branch spacing: how many spaces between branches are printed. + +public: + + enum { max_depth = 64, twig_len = 2, branch_spacing = 5 }; + +private: + + char _branches[max_depth]; + int _pos; + +public: + BranchTracker() + : _pos(0) {} + + void push(bool has_branch) { + if (_pos < max_depth) { + _branches[_pos] = has_branch ? '|' : ' '; + } + _pos ++; // beyond max depth, omit branch drawing but do count on. + } + + void pop() { + assert(_pos > 0, "must be"); + _pos --; + } + + void print(outputStream* st) { + for (int i = 0; i < _pos; i ++) { + st->print("%c%.*s", _branches[i], branch_spacing, " "); + } + } + + class Mark { + BranchTracker& _tr; + public: + Mark(BranchTracker& tr, bool has_branch_here) + : _tr(tr) { _tr.push(has_branch_here); } + ~Mark() { _tr.pop(); } + }; + +}; // end: BranchTracker + +struct LoadedClassInfo : public ResourceObj { +public: + LoadedClassInfo* _next; + Klass* const _klass; + const ClassLoaderData* const _cld; + + LoadedClassInfo(Klass* klass, const ClassLoaderData* cld) + : _klass(klass), _cld(cld) {} + +}; + +class LoaderTreeNode : public ResourceObj { + + // We walk the CLDG and, for each CLD which is non-anonymous, add + // a tree node. To add a node we need its parent node; if it itself + // does not exist yet, we add a preliminary node for it. This preliminary + // node just contains its loader oop; later, when encountering its CLD in + // our CLDG walk, we complete the missing information in this node. + + const oop _loader_oop; + const ClassLoaderData* _cld; + + LoaderTreeNode* _child; + LoaderTreeNode* _next; + + LoadedClassInfo* _classes; + int _num_classes; + + LoadedClassInfo* _anon_classes; + int _num_anon_classes; + + void print_with_childs(outputStream* st, BranchTracker& branchtracker, + bool print_classes, bool verbose) const { + + ResourceMark rm; + + if (_cld == NULL) { + // Not sure how this could happen: we added a preliminary node for a parent but then never encountered + // its CLD? + return; + } + + // Retrieve information. + const Klass* const loader_klass = _cld->class_loader_klass(); + const Symbol* const loader_name = _cld->class_loader_name(); + + branchtracker.print(st); + + // e.g. "+--- jdk.internal.reflect.DelegatingClassLoader" + st->print("+%.*s", BranchTracker::twig_len, "----------"); + if (_cld->is_the_null_class_loader_data()) { + st->print(" "); + } else { + if (loader_name != NULL) { + st->print(" \"%s\",", loader_name->as_C_string()); + } + st->print(" %s", loader_klass != NULL ? loader_klass->external_name() : "??"); + st->print(" {" PTR_FORMAT "}", p2i(_loader_oop)); + } + st->cr(); + + // Output following this node (node details and child nodes) - up to the next sibling node + // needs to be prefixed with "|" if there is a follow up sibling. + const bool have_sibling = _next != NULL; + BranchTracker::Mark trm(branchtracker, have_sibling); + + { + // optional node details following this node needs to be prefixed with "|" + // if there are follow up child nodes. + const bool have_child = _child != NULL; + BranchTracker::Mark trm(branchtracker, have_child); + + // Empty line + branchtracker.print(st); + st->cr(); + + const int indentation = 18; + + if (verbose) { + branchtracker.print(st); + st->print_cr("%*s " PTR_FORMAT, indentation, "Loader Data:", p2i(_cld)); + branchtracker.print(st); + st->print_cr("%*s " PTR_FORMAT, indentation, "Loader Klass:", p2i(loader_klass)); + + // Empty line + branchtracker.print(st); + st->cr(); + } + + if (print_classes) { + + if (_classes != NULL) { + for (LoadedClassInfo* lci = _classes; lci; lci = lci->_next) { + branchtracker.print(st); + if (lci == _classes) { // first iteration + st->print("%*s ", indentation, "Classes:"); + } else { + st->print("%*s ", indentation, ""); + } + st->print("%s", lci->_klass->external_name()); + st->cr(); + // Non-anonymous classes should live in the primary CLD of its loader + assert(lci->_cld == _cld, "must be"); + } + branchtracker.print(st); + st->print("%*s ", indentation, ""); + st->print_cr("(%u class%s)", _num_classes, (_num_classes == 1) ? "" : "es"); + + // Empty line + branchtracker.print(st); + st->cr(); + } + + if (_anon_classes != NULL) { + for (LoadedClassInfo* lci = _anon_classes; lci; lci = lci->_next) { + branchtracker.print(st); + if (lci == _anon_classes) { // first iteration + st->print("%*s ", indentation, "Anonymous Classes:"); + } else { + st->print("%*s ", indentation, ""); + } + st->print("%s", lci->_klass->external_name()); + // For anonymous classes, also print CLD if verbose. Should be a different one than the primary CLD. + assert(lci->_cld != _cld, "must be"); + if (verbose) { + st->print(" (CLD: " PTR_FORMAT ")", p2i(lci->_cld)); + } + st->cr(); + } + branchtracker.print(st); + st->print("%*s ", indentation, ""); + st->print_cr("(%u anonymous class%s)", _num_anon_classes, (_num_anon_classes == 1) ? "" : "es"); + + // Empty line + branchtracker.print(st); + st->cr(); + } + + } // end: print_classes + + } // Pop branchtracker mark + + // Print children, recursively + LoaderTreeNode* c = _child; + while (c != NULL) { + c->print_with_childs(st, branchtracker, print_classes, verbose); + c = c->_next; + } + + } + +public: + + LoaderTreeNode(const oop loader_oop) + : _loader_oop(loader_oop), _cld(NULL) + , _child(NULL), _next(NULL) + , _classes(NULL), _anon_classes(NULL) + , _num_classes(0), _num_anon_classes(0) {} + + void set_cld(const ClassLoaderData* cld) { + _cld = cld; + } + + void add_child(LoaderTreeNode* info) { + info->_next = _child; + _child = info; + } + + void add_sibling(LoaderTreeNode* info) { + assert(info->_next == NULL, "must be"); + info->_next = _next; + _next = info; + } + + void add_classes(LoadedClassInfo* first_class, int num_classes, bool anonymous) { + LoadedClassInfo** p_list_to_add_to = anonymous ? &_anon_classes : &_classes; + // Search tail. + while ((*p_list_to_add_to) != NULL) { + p_list_to_add_to = &(*p_list_to_add_to)->_next; + } + *p_list_to_add_to = first_class; + if (anonymous) { + _num_anon_classes += num_classes; + } else { + _num_classes += num_classes; + } + } + + const ClassLoaderData* cld() const { + return _cld; + } + + const oop loader_oop() const { + return _loader_oop; + } + + LoaderTreeNode* find(const oop loader_oop) { + LoaderTreeNode* result = NULL; + if (_loader_oop == loader_oop) { + result = this; + } else { + LoaderTreeNode* c = _child; + while (c != NULL && result == NULL) { + result = c->find(loader_oop); + c = c->_next; + } + } + return result; + } + + void print_with_childs(outputStream* st, bool print_classes, bool print_add_info) const { + BranchTracker bwt; + print_with_childs(st, bwt, print_classes, print_add_info); + } + +}; + +class LoadedClassCollectClosure : public KlassClosure { +public: + LoadedClassInfo* _list; + const ClassLoaderData* _cld; + int _num_classes; + LoadedClassCollectClosure(const ClassLoaderData* cld) + : _list(NULL), _cld(cld), _num_classes(0) {} + void do_klass(Klass* k) { + LoadedClassInfo* lki = new LoadedClassInfo(k, _cld); + lki->_next = _list; + _list = lki; + _num_classes ++; + } +}; + +class LoaderInfoScanClosure : public CLDClosure { + + const bool _print_classes; + const bool _verbose; + LoaderTreeNode* _root; + + static void fill_in_classes(LoaderTreeNode* info, const ClassLoaderData* cld) { + assert(info != NULL && cld != NULL, "must be"); + LoadedClassCollectClosure lccc(cld); + const_cast(cld)->classes_do(&lccc); + if (lccc._num_classes > 0) { + info->add_classes(lccc._list, lccc._num_classes, cld->is_anonymous()); + } + } + + LoaderTreeNode* find_node_or_add_empty_node(oop loader_oop) { + + assert(_root != NULL, "root node must exist"); + + if (loader_oop == NULL) { + return _root; + } + + // Check if a node for this oop already exists. + LoaderTreeNode* info = _root->find(loader_oop); + + if (info == NULL) { + // It does not. Create a node. + info = new LoaderTreeNode(loader_oop); + + // Add it to tree. + LoaderTreeNode* parent_info = NULL; + + // Recursively add parent nodes if needed. + const oop parent_oop = java_lang_ClassLoader::parent(loader_oop); + if (parent_oop == NULL) { + parent_info = _root; + } else { + parent_info = find_node_or_add_empty_node(parent_oop); + } + assert(parent_info != NULL, "must be"); + + parent_info->add_child(info); + } + return info; + } + + +public: + LoaderInfoScanClosure(bool print_classes, bool verbose) + : _print_classes(print_classes), _verbose(verbose), _root(NULL) { + _root = new LoaderTreeNode(NULL); + } + + void print_results(outputStream* st) const { + _root->print_with_childs(st, _print_classes, _verbose); + } + + void do_cld (ClassLoaderData* cld) { + + // We do not display unloading loaders, for now. + if (cld->is_unloading()) { + return; + } + + const oop loader_oop = cld->class_loader(); + + LoaderTreeNode* info = find_node_or_add_empty_node(loader_oop); + assert(info != NULL, "must be"); + + // Update CLD in node, but only if this is the primary CLD for this loader. + if (cld->is_anonymous() == false) { + assert(info->cld() == NULL, "there should be only one primary CLD per loader"); + info->set_cld(cld); + } + + // Add classes. + fill_in_classes(info, cld); + } + +}; + + +class ClassLoaderHierarchyVMOperation : public VM_Operation { + outputStream* const _out; + const bool _show_classes; + const bool _verbose; +public: + ClassLoaderHierarchyVMOperation(outputStream* out, bool show_classes, bool verbose) : + _out(out), _show_classes(show_classes), _verbose(verbose) + {} + + VMOp_Type type() const { + return VMOp_ClassLoaderHierarchyOperation; + } + + void doit() { + assert(SafepointSynchronize::is_at_safepoint(), "must be a safepoint"); + ResourceMark rm; + LoaderInfoScanClosure cl (_show_classes, _verbose); + ClassLoaderDataGraph::cld_do(&cl); + cl.print_results(_out); + } +}; + +// This command needs to be executed at a safepoint. +void ClassLoaderHierarchyDCmd::execute(DCmdSource source, TRAPS) { + ClassLoaderHierarchyVMOperation op(output(), _show_classes.value(), _verbose.value()); + VMThread::execute(&op); +} diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/classfile/classLoaderHierarchyDCmd.hpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/classfile/classLoaderHierarchyDCmd.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018 SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef HOTSPOT_SHARE_CLASSFILE_CLASSLOADERHIERARCHYDCMD_HPP_ +#define HOTSPOT_SHARE_CLASSFILE_CLASSLOADERHIERARCHYDCMD_HPP_ + +#include "services/diagnosticCommand.hpp" + +class ClassLoaderHierarchyDCmd: public DCmdWithParser { + DCmdArgument _show_classes; + DCmdArgument _verbose; +public: + + ClassLoaderHierarchyDCmd(outputStream* output, bool heap); + + static const char* name() { + return "VM.classloaders"; + } + + static const char* description() { + return "Prints classloader hierarchy."; + } + static const char* impact() { + return "Medium: Depends on number of class loaders and classes loaded."; + } + static const JavaPermission permission() { + JavaPermission p = {"java.lang.management.ManagementPermission", + "monitor", NULL}; + return p; + } + static int num_arguments(); + virtual void execute(DCmdSource source, TRAPS); + +}; + +#endif /* HOTSPOT_SHARE_CLASSFILE_CLASSLOADERHIERARCHYDCMD_HPP_ */ diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/classfile/stringTable.cpp --- a/src/hotspot/share/classfile/stringTable.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/classfile/stringTable.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -29,7 +29,10 @@ #include "classfile/stringTable.hpp" #include "classfile/systemDictionary.hpp" #include "gc/shared/collectedHeap.hpp" +#include "gc/shared/oopStorage.inline.hpp" +#include "gc/shared/oopStorageParState.inline.hpp" #include "logging/log.hpp" +#include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" #include "memory/filemap.hpp" #include "memory/metaspaceShared.hpp" @@ -38,171 +41,196 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayOop.inline.hpp" +#include "oops/weakHandle.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/handles.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/safepointVerifiers.hpp" +#include "runtime/timerTrace.hpp" +#include "runtime/interfaceSupport.inline.hpp" #include "services/diagnosticCommand.hpp" -#include "utilities/hashtable.inline.hpp" +#include "utilities/concurrentHashTable.inline.hpp" +#include "utilities/concurrentHashTableTasks.inline.hpp" #include "utilities/macros.hpp" -// the number of buckets a thread claims -const int ClaimChunkSize = 32; - -#ifdef ASSERT -class StableMemoryChecker : public StackObj { - enum { _bufsize = wordSize*4 }; - - address _region; - jint _size; - u1 _save_buf[_bufsize]; - - int sample(u1* save_buf) { - if (_size <= _bufsize) { - memcpy(save_buf, _region, _size); - return _size; - } else { - // copy head and tail - memcpy(&save_buf[0], _region, _bufsize/2); - memcpy(&save_buf[_bufsize/2], _region + _size - _bufsize/2, _bufsize/2); - return (_bufsize/2)*2; - } - } - - public: - StableMemoryChecker(const void* region, jint size) { - _region = (address) region; - _size = size; - sample(_save_buf); - } - - bool verify() { - u1 check_buf[sizeof(_save_buf)]; - int check_size = sample(check_buf); - return (0 == memcmp(_save_buf, check_buf, check_size)); - } - - void set_region(const void* region) { _region = (address) region; } -}; -#endif - +// We prefer short chains of avg 2 +#define PREF_AVG_LIST_LEN 2 +// 2^24 is max size +#define END_SIZE 24 +// If a chain gets to 32 something might be wrong +#define REHASH_LEN 32 +// If we have as many dead items as 50% of the number of bucket +#define CLEAN_DEAD_HIGH_WATER_MARK 0.5 // -------------------------------------------------------------------------- StringTable* StringTable::_the_table = NULL; bool StringTable::_shared_string_mapped = false; -bool StringTable::_needs_rehashing = false; - -volatile int StringTable::_parallel_claimed_idx = 0; - CompactHashtable StringTable::_shared_table; +bool StringTable::_alt_hash = false; -// Pick hashing algorithm -unsigned int StringTable::hash_string(const jchar* s, int len) { - return use_alternate_hashcode() ? alt_hash_string(s, len) : - java_lang_String::hash_code(s, len); -} - -unsigned int StringTable::alt_hash_string(const jchar* s, int len) { - return AltHashing::murmur3_32(seed(), s, len); -} +static juint murmur_seed = 0; -unsigned int StringTable::hash_string(oop string) { - EXCEPTION_MARK; - if (string == NULL) { - return hash_string((jchar*)NULL, 0); - } - ResourceMark rm(THREAD); - // All String oops are hashed as unicode - int length; - jchar* chars = java_lang_String::as_unicode_string(string, length, THREAD); - if (chars != NULL) { - return hash_string(chars, length); - } else { - vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "unable to create Unicode string for verification"); - return 0; - } -} - -oop StringTable::string_object(HashtableEntry* entry) { - return RootAccess::oop_load(entry->literal_addr()); -} - -oop StringTable::string_object_no_keepalive(HashtableEntry* entry) { - // The AS_NO_KEEPALIVE peeks at the oop without keeping it alive. - // This is *very dangerous* in general but is okay in this specific - // case. The subsequent oop_load keeps the oop alive if it it matched - // the jchar* string. - return RootAccess::oop_load(entry->literal_addr()); -} - -void StringTable::set_string_object(HashtableEntry* entry, oop string) { - RootAccess::oop_store(entry->literal_addr(), string); -} - -oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) { - assert(hash == java_lang_String::hash_code(name, len), - "hash must be computed using java_lang_String::hash_code"); - return _shared_table.lookup((const char*)name, hash, len); +uintx hash_string(const jchar* s, int len, bool useAlt) { + return useAlt ? + AltHashing::murmur3_32(murmur_seed, s, len) : + java_lang_String::hash_code(s, len); } -oop StringTable::lookup_in_main_table(int index, jchar* name, - int len, unsigned int hash) { - int count = 0; - for (HashtableEntry* l = bucket(index); l != NULL; l = l->next()) { - count++; - if (l->hash() == hash) { - if (java_lang_String::equals(string_object_no_keepalive(l), name, len)) { - // We must perform a new load with string_object() that keeps the string - // alive as we must expose the oop as strongly reachable when exiting - // this context, in case the oop gets published. - return string_object(l); - } +class StringTableConfig : public StringTableHash::BaseConfig { + private: + public: + static uintx get_hash(WeakHandle const& value, + bool* is_dead) { + EXCEPTION_MARK; + oop val_oop = value.peek(); + if (val_oop == NULL) { + *is_dead = true; + return 0; + } + *is_dead = false; + ResourceMark rm(THREAD); + // All String oops are hashed as unicode + int length; + jchar* chars = java_lang_String::as_unicode_string(val_oop, length, THREAD); + if (chars != NULL) { + return hash_string(chars, length, StringTable::_alt_hash); } + vm_exit_out_of_memory(length, OOM_MALLOC_ERROR, "get hash from oop"); + return 0; } - // If the bucket size is too deep check if this hash code is insufficient. - if (count >= rehash_count && !needs_rehashing()) { - _needs_rehashing = check_rehash_table(count); + // We use default allocation/deallocation but counted + static void* allocate_node(size_t size, + WeakHandle const& value) { + StringTable::item_added(); + return StringTableHash::BaseConfig::allocate_node(size, value); + } + static void free_node(void* memory, + WeakHandle const& value) { + value.release(); + StringTableHash::BaseConfig::free_node(memory, value); + StringTable::item_removed(); + } +}; + +class StringTableLookupJchar : StackObj { + private: + Thread* _thread; + uintx _hash; + int _len; + const jchar* _str; + Handle _found; + + public: + StringTableLookupJchar(Thread* thread, uintx hash, const jchar* key, int len) + : _thread(thread), _hash(hash), _str(key), _len(len) { + } + uintx get_hash() const { + return _hash; } - return NULL; + bool equals(WeakHandle* value, bool* is_dead) { + oop val_oop = value->peek(); + if (val_oop == NULL) { + // dead oop, mark this hash dead for cleaning + *is_dead = true; + return false; + } + bool equals = java_lang_String::equals(val_oop, (jchar*)_str, _len); + if (!equals) { + return false; + } + // Need to resolve weak handle and Handleize through possible safepoint. + _found = Handle(_thread, value->resolve()); + return true; + } +}; + +class StringTableLookupOop : public StackObj { + private: + Thread* _thread; + uintx _hash; + Handle _find; + Handle _found; // Might be a different oop with the same value that's already + // in the table, which is the point. + public: + StringTableLookupOop(Thread* thread, uintx hash, Handle handle) + : _thread(thread), _hash(hash), _find(handle) { } + + uintx get_hash() const { + return _hash; + } + + bool equals(WeakHandle* value, bool* is_dead) { + oop val_oop = value->peek(); + if (val_oop == NULL) { + // dead oop, mark this hash dead for cleaning + *is_dead = true; + return false; + } + bool equals = java_lang_String::equals(_find(), val_oop); + if (!equals) { + return false; + } + // Need to resolve weak handle and Handleize through possible safepoint. + _found = Handle(_thread, value->resolve()); + return true; + } +}; + +static size_t ceil_pow_2(uintx val) { + size_t ret; + for (ret = 1; ((size_t)1 << ret) < val; ++ret); + return ret; } - -oop StringTable::basic_add(int index_arg, Handle string, jchar* name, - int len, unsigned int hashValue_arg, TRAPS) { - - assert(java_lang_String::equals(string(), name, len), - "string must be properly initialized"); - // Cannot hit a safepoint in this function because the "this" pointer can move. - NoSafepointVerifier nsv; +StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0), + _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) { + _weak_handles = new OopStorage("StringTable weak", + StringTableWeakAlloc_lock, + StringTableWeakActive_lock); + size_t start_size_log_2 = ceil_pow_2(StringTableSize); + _current_size = ((size_t)1) << start_size_log_2; + log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", + _current_size, start_size_log_2); + _local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN); +} - // Check if the symbol table has been rehashed, if so, need to recalculate - // the hash value and index before second lookup. - unsigned int hashValue; - int index; - if (use_alternate_hashcode()) { - hashValue = alt_hash_string(name, len); - index = hash_to_index(hashValue); - } else { - hashValue = hashValue_arg; - index = index_arg; - } +size_t StringTable::item_added() { + return Atomic::add((size_t)1, &(the_table()->_items)); +} - // Since look-up was done lock-free, we need to check if another - // thread beat us in the race to insert the symbol. - - // No need to lookup the shared table from here since the caller (intern()) already did - oop test = lookup_in_main_table(index, name, len, hashValue); // calls lookup(u1*, int) - if (test != NULL) { - // Entry already added - return test; - } - - HashtableEntry* entry = new_entry(hashValue, string()); - add_entry(index, entry); - return string(); +size_t StringTable::items_to_clean(size_t ncl) { + size_t total = Atomic::add((size_t)ncl, &(the_table()->_uncleaned_items)); + log_trace(stringtable)( + "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, + the_table()->_uncleaned_items, ncl, total); + return total; } +void StringTable::item_removed() { + Atomic::add((size_t)-1, &(the_table()->_items)); + Atomic::add((size_t)-1, &(the_table()->_uncleaned_items)); +} +double StringTable::get_load_factor() { + return (_items*1.0)/_current_size; +} + +double StringTable::get_dead_factor() { + return (_uncleaned_items*1.0)/_current_size; +} + +size_t StringTable::table_size(Thread* thread) { + return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread + : Thread::current()); +} + +void StringTable::trigger_concurrent_work() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + the_table()->_has_work = true; + Service_lock->notify_all(); +} + +// Probing oop StringTable::lookup(Symbol* symbol) { ResourceMark rm; int length; @@ -211,71 +239,45 @@ } oop StringTable::lookup(jchar* name, int len) { - // shared table always uses java_lang_String::hash_code unsigned int hash = java_lang_String::hash_code(name, len); - oop string = lookup_shared(name, len, hash); + oop string = StringTable::the_table()->lookup_shared(name, len, hash); if (string != NULL) { return string; } - if (use_alternate_hashcode()) { - hash = alt_hash_string(name, len); + if (StringTable::_alt_hash) { + hash = hash_string(name, len, true); } - int index = the_table()->hash_to_index(hash); - string = the_table()->lookup_in_main_table(index, name, len, hash); - - return string; + return StringTable::the_table()->do_lookup(name, len, hash); } -oop StringTable::intern(Handle string_or_null, jchar* name, - int len, TRAPS) { - // shared table always uses java_lang_String::hash_code - unsigned int hashValue = java_lang_String::hash_code(name, len); - oop found_string = lookup_shared(name, len, hashValue); - if (found_string != NULL) { - return found_string; - } - if (use_alternate_hashcode()) { - hashValue = alt_hash_string(name, len); +class StringTableGet : public StackObj { + Thread* _thread; + Handle _return; + public: + StringTableGet(Thread* thread) : _thread(thread) {} + void operator()(WeakHandle* val) { + oop result = val->resolve(); + assert(result != NULL, "Result should be reachable"); + _return = Handle(_thread, result); } - int index = the_table()->hash_to_index(hashValue); - found_string = the_table()->lookup_in_main_table(index, name, len, hashValue); - - // Found - if (found_string != NULL) { - return found_string; + oop get_res_oop() { + return _return(); } - - debug_only(StableMemoryChecker smc(name, len * sizeof(name[0]))); - assert(!Universe::heap()->is_in_reserved(name), - "proposed name of symbol must be stable"); +}; - HandleMark hm(THREAD); // cleanup strings created - Handle string; - // try to reuse the string if possible - if (!string_or_null.is_null()) { - string = string_or_null; - } else { - string = java_lang_String::create_from_unicode(name, len, CHECK_NULL); +oop StringTable::do_lookup(jchar* name, int len, uintx hash) { + Thread* thread = Thread::current(); + StringTableLookupJchar lookup(thread, hash, name, len); + StringTableGet stg(thread); + bool rehash_warning; + _local_table->get(thread, lookup, stg, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; } - - // Deduplicate the string before it is interned. Note that we should never - // deduplicate a string after it has been interned. Doing so will counteract - // compiler optimizations done on e.g. interned string literals. - Universe::heap()->deduplicate_string(string()); - - // Grab the StringTable_lock before getting the_table() because it could - // change at safepoint. - oop added_or_found; - { - MutexLocker ml(StringTable_lock, THREAD); - // Otherwise, add to symbol to table - added_or_found = the_table()->basic_add(index, string, name, len, - hashValue, CHECK_NULL); - } - - return added_or_found; + return stg.get_res_oop(); } +// Interning oop StringTable::intern(Symbol* symbol, TRAPS) { if (symbol == NULL) return NULL; ResourceMark rm(THREAD); @@ -286,19 +288,17 @@ return result; } - -oop StringTable::intern(oop string, TRAPS) -{ +oop StringTable::intern(oop string, TRAPS) { if (string == NULL) return NULL; ResourceMark rm(THREAD); int length; Handle h_string (THREAD, string); - jchar* chars = java_lang_String::as_unicode_string(string, length, CHECK_NULL); + jchar* chars = java_lang_String::as_unicode_string(string, length, + CHECK_NULL); oop result = intern(h_string, chars, length, CHECK_NULL); return result; } - oop StringTable::intern(const char* utf8_string, TRAPS) { if (utf8_string == NULL) return NULL; ResourceMark rm(THREAD); @@ -310,340 +310,449 @@ return result; } -void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { - BucketUnlinkContext context; - buckets_unlink_or_oops_do(is_alive, f, 0, the_table()->table_size(), &context); - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; -} - -void StringTable::possibly_parallel_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int* processed, int* removed) { - // Readers of the table are unlocked, so we should only be removing - // entries at a safepoint. - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - const int limit = the_table()->table_size(); - - BucketUnlinkContext context; - for (;;) { - // Grab next set of buckets to scan - int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; - if (start_idx >= limit) { - // End of table - break; - } - - int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_unlink_or_oops_do(is_alive, f, start_idx, end_idx, &context); +oop StringTable::intern(Handle string_or_null_h, jchar* name, int len, TRAPS) { + // shared table always uses java_lang_String::hash_code + unsigned int hash = java_lang_String::hash_code(name, len); + oop found_string = StringTable::the_table()->lookup_shared(name, len, hash); + if (found_string != NULL) { + return found_string; } - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; + if (StringTable::_alt_hash) { + hash = hash_string(name, len, true); + } + return StringTable::the_table()->do_intern(string_or_null_h, name, len, + hash, CHECK_NULL); } -void StringTable::buckets_oops_do(OopClosure* f, int start_idx, int end_idx) { - const int limit = the_table()->table_size(); +class StringTableCreateEntry : public StackObj { + private: + Thread* _thread; + Handle _return; + Handle _store; + public: + StringTableCreateEntry(Thread* thread, Handle store) + : _thread(thread), _store(store) {} - assert(0 <= start_idx && start_idx <= limit, - "start_idx (%d) is out of bounds", start_idx); - assert(0 <= end_idx && end_idx <= limit, - "end_idx (%d) is out of bounds", end_idx); - assert(start_idx <= end_idx, - "Index ordering: start_idx=%d, end_idx=%d", - start_idx, end_idx); + WeakHandle operator()() { // No dups found + WeakHandle wh = + WeakHandle::create(_store); + return wh; + } + void operator()(bool inserted, WeakHandle* val) { + oop result = val->resolve(); + assert(result != NULL, "Result should be reachable"); + _return = Handle(_thread, result); + } + oop get_return() const { + return _return(); + } +}; - for (int i = start_idx; i < end_idx; i += 1) { - HashtableEntry* entry = the_table()->bucket(i); - while (entry != NULL) { - assert(!entry->is_shared(), "CDS not used for the StringTable"); +oop StringTable::do_intern(Handle string_or_null_h, jchar* name, + int len, uintx hash, TRAPS) { + HandleMark hm(THREAD); // cleanup strings created + Handle string_h; + + if (!string_or_null_h.is_null()) { + string_h = string_or_null_h; + } else { + string_h = java_lang_String::create_from_unicode(name, len, CHECK_NULL); + } - f->do_oop((oop*)entry->literal_addr()); + // Deduplicate the string before it is interned. Note that we should never + // deduplicate a string after it has been interned. Doing so will counteract + // compiler optimizations done on e.g. interned string literals. + Universe::heap()->deduplicate_string(string_h()); - entry = entry->next(); - } + assert(java_lang_String::equals(string_h(), name, len), + "string must be properly initialized"); + assert(len == java_lang_String::length(string_h()), "Must be same length"); + StringTableLookupOop lookup(THREAD, hash, string_h); + StringTableCreateEntry stc(THREAD, string_h); + + bool rehash_warning; + _local_table->get_insert_lazy(THREAD, lookup, stc, stc, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; } + return stc.get_return(); } -void StringTable::buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context) { - const int limit = the_table()->table_size(); - - assert(0 <= start_idx && start_idx <= limit, - "start_idx (%d) is out of bounds", start_idx); - assert(0 <= end_idx && end_idx <= limit, - "end_idx (%d) is out of bounds", end_idx); - assert(start_idx <= end_idx, - "Index ordering: start_idx=%d, end_idx=%d", - start_idx, end_idx); +// GC support +class StringTableIsAliveCounter : public BoolObjectClosure { + BoolObjectClosure* _real_boc; + public: + size_t _count; + size_t _count_total; + StringTableIsAliveCounter(BoolObjectClosure* boc) : _real_boc(boc), _count(0), + _count_total(0) {} + bool do_object_b(oop obj) { + bool ret = _real_boc->do_object_b(obj); + if (!ret) { + ++_count; + } + ++_count_total; + return ret; + } +}; - for (int i = start_idx; i < end_idx; ++i) { - HashtableEntry** p = the_table()->bucket_addr(i); - HashtableEntry* entry = the_table()->bucket(i); - while (entry != NULL) { - assert(!entry->is_shared(), "CDS not used for the StringTable"); +void StringTable::unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, + int* processed, int* removed) { + DoNothingClosure dnc; + assert(is_alive != NULL, "No closure"); + StringTableIsAliveCounter stiac(is_alive); + OopClosure* tmp = f != NULL ? f : &dnc; - if (is_alive->do_object_b(string_object_no_keepalive(entry))) { - if (f != NULL) { - f->do_oop(entry->literal_addr()); - } - p = entry->next_addr(); - } else { - *p = entry->next(); - context->free_entry(entry); - } - context->_num_processed++; - entry = *p; - } + StringTable::the_table()->_weak_handles->weak_oops_do(&stiac, tmp); + + StringTable::the_table()->items_to_clean(stiac._count); + StringTable::the_table()->check_concurrent_work(); + if (processed != NULL) { + *processed = (int) stiac._count_total; + } + if (removed != NULL) { + *removed = (int) stiac._count; } } void StringTable::oops_do(OopClosure* f) { - buckets_oops_do(f, 0, the_table()->table_size()); + assert(f != NULL, "No closure"); + StringTable::the_table()->_weak_handles->oops_do(f); +} + +void StringTable::possibly_parallel_unlink( + OopStorage::ParState* _par_state_string, BoolObjectClosure* cl, + int* processed, int* removed) +{ + DoNothingClosure dnc; + assert(cl != NULL, "No closure"); + StringTableIsAliveCounter stiac(cl); + + _par_state_string->weak_oops_do(&stiac, &dnc); + + StringTable::the_table()->items_to_clean(stiac._count); + StringTable::the_table()->check_concurrent_work(); + *processed = (int) stiac._count_total; + *removed = (int) stiac._count; +} + +void StringTable::possibly_parallel_oops_do( + OopStorage::ParState* + _par_state_string, OopClosure* f) +{ + assert(f != NULL, "No closure"); + _par_state_string->oops_do(f); +} + +// Concurrent work +void StringTable::grow(JavaThread* jt) { + StringTableHash::GrowTask gt(_local_table); + if (!gt.prepare(jt)) { + return; + } + log_trace(stringtable)("Started to grow"); + { + TraceTime timer("Grow", TRACETIME_LOG(Debug, stringtable, perf)); + while (gt.doTask(jt)) { + gt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + gt.cont(jt); + } + } + gt.done(jt); + _current_size = table_size(jt); + log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size); } -void StringTable::possibly_parallel_oops_do(OopClosure* f) { - const int limit = the_table()->table_size(); +struct StringTableDoDelete : StackObj { + long _count; + StringTableDoDelete() : _count(0) {} + void operator()(WeakHandle* val) { + ++_count; + } +}; + +struct StringTableDeleteCheck : StackObj { + long _count; + long _item; + StringTableDeleteCheck() : _count(0), _item(0) {} + bool operator()(WeakHandle* val) { + ++_item; + oop tmp = val->peek(); + if (tmp == NULL) { + ++_count; + return true; + } else { + return false; + } + } +}; - for (;;) { - // Grab next set of buckets to scan - int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; - if (start_idx >= limit) { - // End of table - break; +void StringTable::clean_dead_entries(JavaThread* jt) { + StringTableHash::BulkDeleteTask bdt(_local_table); + if (!bdt.prepare(jt)) { + return; + } + + StringTableDeleteCheck stdc; + StringTableDoDelete stdd; + bool interrupted = false; + { + TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf)); + while(bdt.doTask(jt, stdc, stdd)) { + bdt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + if (!bdt.cont(jt)) { + interrupted = true; + break; + } } - - int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_oops_do(f, start_idx, end_idx); } + if (interrupted) { + _has_work = true; + } else { + bdt.done(jt); + } + log_debug(stringtable)("Cleaned %ld of %ld", stdc._count, stdc._item); } -// This verification is part of Universe::verify() and needs to be quick. -// See StringTable::verify_and_compare() below for exhaustive verification. -void StringTable::verify() { - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - oop s = string_object_no_keepalive(p); - guarantee(s != NULL, "interned string is NULL"); - unsigned int h = hash_string(s); - guarantee(p->hash() == h, "broken hash in string table entry"); - guarantee(the_table()->hash_to_index(h) == i, - "wrong index in string table"); - } +void StringTable::check_concurrent_work() { + if (_has_work) { + return; + } + double load_factor = StringTable::get_load_factor(); + double dead_factor = StringTable::get_dead_factor(); + // We should clean/resize if we have more dead than alive, + // more items than preferred load factor or + // more dead items than water mark. + if ((dead_factor > load_factor) || + (load_factor > PREF_AVG_LIST_LEN) || + (dead_factor > CLEAN_DEAD_HIGH_WATER_MARK)) { + log_debug(stringtable)("Concurrent work triggered, live factor:%g dead factor:%g", + load_factor, dead_factor); + trigger_concurrent_work(); } } -void StringTable::dump(outputStream* st, bool verbose) { - if (!verbose) { - the_table()->print_table_statistics(st, "StringTable", string_object_no_keepalive); +void StringTable::concurrent_work(JavaThread* jt) { + _has_work = false; + double load_factor = get_load_factor(); + log_debug(stringtable, perf)("Concurrent work, live factor: %g", load_factor); + // We prefer growing, since that also removes dead items + if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { + grow(jt); } else { - Thread* THREAD = Thread::current(); - st->print_cr("VERSION: 1.1"); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - oop s = string_object_no_keepalive(p); - typeArrayOop value = java_lang_String::value_no_keepalive(s); - int length = java_lang_String::length(s); - bool is_latin1 = java_lang_String::is_latin1(s); - - if (length <= 0) { - st->print("%d: ", length); - } else { - ResourceMark rm(THREAD); - int utf8_length = length; - char* utf8_string; - - if (!is_latin1) { - jchar* chars = value->char_at_addr(0); - utf8_string = UNICODE::as_utf8(chars, utf8_length); - } else { - jbyte* bytes = value->byte_at_addr(0); - utf8_string = UNICODE::as_utf8(bytes, utf8_length); - } - - st->print("%d: ", utf8_length); - HashtableTextDump::put_utf8(st, utf8_string, utf8_length); - } - st->cr(); - } - } + clean_dead_entries(jt); } } -StringTable::VerifyRetTypes StringTable::compare_entries( - int bkt1, int e_cnt1, - HashtableEntry* e_ptr1, - int bkt2, int e_cnt2, - HashtableEntry* e_ptr2) { - // These entries are sanity checked by verify_and_compare_entries() - // before this function is called. - oop str1 = string_object_no_keepalive(e_ptr1); - oop str2 = string_object_no_keepalive(e_ptr2); +void StringTable::do_concurrent_work(JavaThread* jt) { + StringTable::the_table()->concurrent_work(jt); +} - if (str1 == str2) { - tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") " - "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]", - p2i(str1), bkt1, e_cnt1, bkt2, e_cnt2); - return _verify_fail_continue; +// Rehash +bool StringTable::do_rehash() { + if (!_local_table->is_safepoint_safe()) { + return false; } - if (java_lang_String::equals(str1, str2)) { - tty->print_cr("ERROR: identical String values in entry @ " - "bucket[%d][%d] and entry @ bucket[%d][%d]", - bkt1, e_cnt1, bkt2, e_cnt2); - return _verify_fail_continue; + // We use max size + StringTableHash* new_table = new StringTableHash(END_SIZE, END_SIZE, REHASH_LEN); + // Use alt hash from now on + _alt_hash = true; + if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) { + _alt_hash = false; + delete new_table; + return false; } - return _verify_pass; + // free old table + delete _local_table; + _local_table = new_table; + + return true; } -StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt, - HashtableEntry* e_ptr, - StringTable::VerifyMesgModes mesg_mode) { - - VerifyRetTypes ret = _verify_pass; // be optimistic +void StringTable::try_rehash_table() { + static bool rehashed = false; + log_debug(stringtable)("Table imbalanced, rehashing called."); - oop str = string_object_no_keepalive(e_ptr); - if (str == NULL) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt, - e_cnt); - } - // NULL oop means no more verifications are possible - return _verify_fail_done; + // Grow instead of rehash. + if (get_load_factor() > PREF_AVG_LIST_LEN && + !_local_table->is_max_size_reached()) { + log_debug(stringtable)("Choosing growing over rehashing."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + // Already rehashed. + if (rehashed) { + log_warning(stringtable)("Rehashing already done, still long lists."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; } - if (str->klass() != SystemDictionary::String_klass()) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]", - bkt, e_cnt); + murmur_seed = AltHashing::compute_seed(); + { + if (do_rehash()) { + rehashed = true; + } else { + log_info(stringtable)("Resizes in progress rehashing skipped."); } - // not a String means no more verifications are possible - return _verify_fail_done; } + _needs_rehashing = false; +} + +void StringTable::rehash_table() { + StringTable::the_table()->try_rehash_table(); +} - unsigned int h = hash_string(str); - if (e_ptr->hash() != h) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], " - "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h); - } - ret = _verify_fail_continue; +// Statistics +static int literal_size(oop obj) { + // NOTE: this would over-count if (pre-JDK8) + // java_lang_Class::has_offset_field() is true and the String.value array is + // shared by several Strings. However, starting from JDK8, the String.value + // array is not shared anymore. + if (obj == NULL) { + return 0; + } else if (obj->klass() == SystemDictionary::String_klass()) { + return (obj->size() + java_lang_String::value(obj)->size()) * HeapWordSize; + } else { + return obj->size(); } +} - if (the_table()->hash_to_index(h) != bkt) { - if (mesg_mode == _verify_with_mesgs) { - tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], " - "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h, - the_table()->hash_to_index(h)); +struct SizeFunc : StackObj { + size_t operator()(WeakHandle* val) { + oop s = val->peek(); + if (s == NULL) { + // Dead + return 0; } - ret = _verify_fail_continue; - } + return literal_size(s); + }; +}; - return ret; +void StringTable::print_table_statistics(outputStream* st, + const char* table_name) { + SizeFunc sz; + _local_table->statistics_to(Thread::current(), sz, st, table_name); } -// See StringTable::verify() above for the quick verification that is -// part of Universe::verify(). This verification is exhaustive and -// reports on every issue that is found. StringTable::verify() only -// reports on the first issue that is found. -// -// StringTable::verify_entry() checks: -// - oop value != NULL (same as verify()) -// - oop value is a String -// - hash(String) == hash in entry (same as verify()) -// - index for hash == index of entry (same as verify()) -// -// StringTable::compare_entries() checks: -// - oops are unique across all entries -// - String values are unique across all entries -// -int StringTable::verify_and_compare_entries() { - assert(StringTable_lock->is_locked(), "sanity check"); +// Verification +class VerifyStrings : StackObj { + public: + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s != NULL) { + assert(java_lang_String::length(s) >= 0, "Length on string must work."); + } + return true; + }; +}; + +// This verification is part of Universe::verify() and needs to be quick. +void StringTable::verify() { + Thread* thr = Thread::current(); + VerifyStrings vs; + if (!the_table()->_local_table->try_scan(thr, vs)) { + log_info(stringtable)("verify unavailable at this moment"); + } +} + +// Verification and comp +class VerifyCompStrings : StackObj { + GrowableArray* _oops; + public: + size_t _errors; + VerifyCompStrings(GrowableArray* oops) : _oops(oops), _errors(0) {} + bool operator()(WeakHandle* val) { + oop s = val->resolve(); + if (s == NULL) { + return true; + } + int len = _oops->length(); + for (int i = 0; i < len; i++) { + bool eq = java_lang_String::equals(s, _oops->at(i)); + assert(!eq, "Duplicate strings"); + if (eq) { + _errors++; + } + } + _oops->push(s); + return true; + }; +}; + +size_t StringTable::verify_and_compare_entries() { + Thread* thr = Thread::current(); + GrowableArray* oops = + new (ResourceObj::C_HEAP, mtInternal) + GrowableArray((int)the_table()->_current_size, true); - int fail_cnt = 0; + VerifyCompStrings vcs(oops); + if (!the_table()->_local_table->try_scan(thr, vcs)) { + log_info(stringtable)("verify unavailable at this moment"); + } + delete oops; + return vcs._errors; +} + +// Dumping +class PrintString : StackObj { + Thread* _thr; + outputStream* _st; + public: + PrintString(Thread* thr, outputStream* st) : _thr(thr), _st(st) {} + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s == NULL) { + return true; + } + typeArrayOop value = java_lang_String::value_no_keepalive(s); + int length = java_lang_String::length(s); + bool is_latin1 = java_lang_String::is_latin1(s); - // first, verify all the entries individually: - for (int bkt = 0; bkt < the_table()->table_size(); bkt++) { - HashtableEntry* e_ptr = the_table()->bucket(bkt); - for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) { - VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs); - if (ret != _verify_pass) { - fail_cnt++; + if (length <= 0) { + _st->print("%d: ", length); + } else { + ResourceMark rm(_thr); + int utf8_length = length; + char* utf8_string; + + if (!is_latin1) { + jchar* chars = value->char_at_addr(0); + utf8_string = UNICODE::as_utf8(chars, utf8_length); + } else { + jbyte* bytes = value->byte_at_addr(0); + utf8_string = UNICODE::as_utf8(bytes, utf8_length); } + + _st->print("%d: ", utf8_length); + HashtableTextDump::put_utf8(_st, utf8_string, utf8_length); + } + _st->cr(); + return true; + }; +}; + +void StringTable::dump(outputStream* st, bool verbose) { + if (!verbose) { + the_table()->print_table_statistics(st, "StringTable"); + } else { + Thread* thr = Thread::current(); + ResourceMark rm(thr); + st->print_cr("VERSION: 1.1"); + PrintString ps(thr, st); + if (!the_table()->_local_table->try_scan(thr, ps)) { + st->print_cr("dump unavailable at this moment"); } } - - // Optimization: if the above check did not find any failures, then - // the comparison loop below does not need to call verify_entry() - // before calling compare_entries(). If there were failures, then we - // have to call verify_entry() to see if the entry can be passed to - // compare_entries() safely. When we call verify_entry() in the loop - // below, we do so quietly to void duplicate messages and we don't - // increment fail_cnt because the failures have already been counted. - bool need_entry_verify = (fail_cnt != 0); - - // second, verify all entries relative to each other: - for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) { - HashtableEntry* e_ptr1 = the_table()->bucket(bkt1); - for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) { - if (need_entry_verify) { - VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1, - _verify_quietly); - if (ret == _verify_fail_done) { - // cannot use the current entry to compare against other entries - continue; - } - } - - for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) { - HashtableEntry* e_ptr2 = the_table()->bucket(bkt2); - int e_cnt2; - for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) { - if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) { - // skip the entries up to and including the one that - // we're comparing against - continue; - } - - if (need_entry_verify) { - VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2, - _verify_quietly); - if (ret == _verify_fail_done) { - // cannot compare against this entry - continue; - } - } - - // compare two entries, report and count any failures: - if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2) - != _verify_pass) { - fail_cnt++; - } - } - } - } - } - return fail_cnt; -} - -// Create a new table and using alternate hash code, populate the new table -// with the existing strings. Set flag to use the alternate hash code afterwards. -void StringTable::rehash_table() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - // This should never happen with -Xshare:dump but it might in testing mode. - if (DumpSharedSpaces) return; - StringTable* new_table = new StringTable(); - - // Rehash the table - the_table()->move_to(new_table); - - // Delete the table and buckets (entries are reused in new table). - delete _the_table; - // Don't check if we need rehashing until the table gets unbalanced again. - // Then rehash with a new global seed. - _needs_rehashing = false; - _the_table = new_table; } // Utility for dumping strings @@ -671,14 +780,21 @@ } } +// Sharing #if INCLUDE_CDS_JAVA_HEAP -// Sharing +oop StringTable::lookup_shared(jchar* name, int len, unsigned int hash) { + assert(hash == java_lang_String::hash_code(name, len), + "hash must be computed using java_lang_String::hash_code"); + return _shared_table.lookup((const char*)name, hash, len); +} + oop StringTable::create_archived_string(oop s, Thread* THREAD) { assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); oop new_s = NULL; typeArrayOop v = java_lang_String::value_no_keepalive(s); - typeArrayOop new_v = (typeArrayOop)MetaspaceShared::archive_heap_object(v, THREAD); + typeArrayOop new_v = + (typeArrayOop)MetaspaceShared::archive_heap_object(v, THREAD); if (new_v == NULL) { return NULL; } @@ -692,51 +808,51 @@ return new_s; } -bool StringTable::copy_shared_string(GrowableArray *string_space, - CompactStringTableWriter* writer) { +struct CopyToArchive : StackObj { + CompactStringTableWriter* _writer; + CopyToArchive(CompactStringTableWriter* writer) : _writer(writer) {} + bool operator()(WeakHandle* val) { + oop s = val->peek(); + if (s == NULL) { + return true; + } + unsigned int hash = java_lang_String::hash_code(s); + if (hash == 0) { + return true; + } + + java_lang_String::set_hash(s, hash); + oop new_s = StringTable::create_archived_string(s, Thread::current()); + if (new_s == NULL) { + return true; + } + + val->replace(new_s); + // add to the compact table + _writer->add(hash, new_s); + return true; + } +}; + +void StringTable::copy_shared_string_table(CompactStringTableWriter* writer) { assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); - Thread* THREAD = Thread::current(); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry* bucket = the_table()->bucket(i); - for ( ; bucket != NULL; bucket = bucket->next()) { - oop s = string_object_no_keepalive(bucket); - unsigned int hash = java_lang_String::hash_code(s); - if (hash == 0) { - continue; - } - - java_lang_String::set_hash(s, hash); - oop new_s = create_archived_string(s, THREAD); - if (new_s == NULL) { - continue; - } - - // set the archived string in bucket - set_string_object(bucket, new_s); - - // add to the compact table - writer->add(hash, new_s); - } - } - - return true; + CopyToArchive copy(writer); + StringTable::the_table()->_local_table->do_scan(Thread::current(), copy); } -void StringTable::write_to_archive(GrowableArray *string_space) { +void StringTable::write_to_archive() { assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); _shared_table.reset(); - int num_buckets = the_table()->number_of_entries() / - SharedSymbolTableBucketSize; + int num_buckets = the_table()->_items / SharedSymbolTableBucketSize; // calculation of num_buckets can result in zero buckets, we need at least one CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1, &MetaspaceShared::stats()->string); // Copy the interned strings into the "string space" within the java heap - if (copy_shared_string(string_space, &writer)) { - writer.dump(&_shared_table); - } + copy_shared_string_table(&writer); + writer.dump(&_shared_table); } void StringTable::serialize(SerializeClosure* soc) { @@ -744,7 +860,8 @@ _shared_table.serialize(soc); if (soc->writing()) { - _shared_table.reset(); // Sanity. Make sure we don't use the shared table at dump time + // Sanity. Make sure we don't use the shared table at dump time + _shared_table.reset(); } else if (!_shared_string_mapped) { _shared_table.reset(); } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/classfile/stringTable.hpp --- a/src/hotspot/share/classfile/stringTable.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/classfile/stringTable.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -25,109 +25,111 @@ #ifndef SHARE_VM_CLASSFILE_STRINGTABLE_HPP #define SHARE_VM_CLASSFILE_STRINGTABLE_HPP -#include "utilities/hashtable.hpp" +#include "gc/shared/oopStorage.hpp" +#include "gc/shared/oopStorageParState.hpp" +#include "memory/allocation.hpp" +#include "memory/padded.hpp" +#include "oops/oop.hpp" +#include "oops/weakHandle.hpp" +#include "utilities/concurrentHashTable.hpp" template class CompactHashtable; class CompactStringTableWriter; -class FileMapInfo; class SerializeClosure; -class StringTable : public RehashableHashtable { +class StringTable; +class StringTableConfig; +typedef ConcurrentHashTable, + StringTableConfig, mtSymbol> StringTableHash; + +class StringTableCreateEntry; + +class StringTable : public CHeapObj{ friend class VMStructs; friend class Symbol; + friend class StringTableConfig; + friend class StringTableCreateEntry; private: + void grow(JavaThread* jt); + void clean_dead_entries(JavaThread* jt); + // The string table static StringTable* _the_table; - // Shared string table static CompactHashtable _shared_table; static bool _shared_string_mapped; + static bool _alt_hash; +private: - // Set if one bucket is out of balance due to hash algorithm deficiency - static bool _needs_rehashing; - - // Claimed high water mark for parallel chunked scanning - static volatile int _parallel_claimed_idx; + // Set if one bucket is out of balance due to hash algorithm deficiency + StringTableHash* _local_table; + size_t _current_size; + volatile bool _has_work; + volatile bool _needs_rehashing; - static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS); - oop basic_add(int index, Handle string_or_null, jchar* name, int len, - unsigned int hashValue, TRAPS); + OopStorage* _weak_handles; - oop lookup_in_main_table(int index, jchar* chars, int length, unsigned int hashValue); - static oop lookup_shared(jchar* name, int len, unsigned int hash); + volatile size_t _items; + DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); + volatile size_t _uncleaned_items; + DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); - // Apply the give oop closure to the entries to the buckets - // in the range [start_idx, end_idx). - static void buckets_oops_do(OopClosure* f, int start_idx, int end_idx); + double get_load_factor(); + double get_dead_factor(); - typedef StringTable::BucketUnlinkContext BucketUnlinkContext; - // Unlink or apply the give oop closure to the entries to the buckets - // in the range [start_idx, end_idx). Unlinked bucket entries are collected in the given - // context to be freed later. - // This allows multiple threads to work on the table at once. - static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, BucketUnlinkContext* context); + void check_concurrent_work(); + void trigger_concurrent_work(); - // Hashing algorithm, used as the hash value used by the - // StringTable for bucket selection and comparison (stored in the - // HashtableEntry structures). This is used in the String.intern() method. - static unsigned int hash_string(const jchar* s, int len); - static unsigned int hash_string(oop string); - static unsigned int alt_hash_string(const jchar* s, int len); + static uintx item_added(); + static void item_removed(); + static size_t items_to_clean(size_t ncl); + + StringTable(); - // Accessors for the string roots in the hashtable entries. - // Use string_object_no_keepalive() only when the value is not returned - // outside of a scope where a thread transition is possible. - static oop string_object(HashtableEntry* entry); - static oop string_object_no_keepalive(HashtableEntry* entry); - static void set_string_object(HashtableEntry* entry, oop string); + static oop intern(Handle string_or_null_h, jchar* name, int len, TRAPS); + oop do_intern(Handle string_or_null, jchar* name, int len, uintx hash, TRAPS); + oop do_lookup(jchar* name, int len, uintx hash); - StringTable() : RehashableHashtable((int)StringTableSize, - sizeof (HashtableEntry)) {} + void concurrent_work(JavaThread* jt); + void print_table_statistics(outputStream* st, const char* table_name); - StringTable(HashtableBucket* t, int number_of_entries) - : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry), t, - number_of_entries) {} -public: + void try_rehash_table(); + bool do_rehash(); + + public: // The string table static StringTable* the_table() { return _the_table; } + size_t table_size(Thread* thread = NULL); - // Size of one bucket in the string table. Used when checking for rollover. - static uint bucket_size() { return sizeof(HashtableBucket); } + static OopStorage* weak_storage() { return the_table()->_weak_handles; } static void create_table() { assert(_the_table == NULL, "One string table allowed."); _the_table = new StringTable(); } + static void do_concurrent_work(JavaThread* jt); + static bool has_work() { return the_table()->_has_work; } + // GC support // Delete pointers to otherwise-unreachable objects. - static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f) { - int processed = 0; - int removed = 0; - unlink_or_oops_do(cl, f, &processed, &removed); + static void unlink(BoolObjectClosure* cl) { + unlink_or_oops_do(cl); } - static void unlink(BoolObjectClosure* cl) { - int processed = 0; - int removed = 0; - unlink_or_oops_do(cl, NULL, &processed, &removed); - } - static void unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed); - static void unlink(BoolObjectClosure* cl, int* processed, int* removed) { - unlink_or_oops_do(cl, NULL, processed, removed); - } + static void unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f = NULL, + int* processed = NULL, int* removed = NULL); + // Serially invoke "f->do_oop" on the locations of all oops in the table. static void oops_do(OopClosure* f); // Possibly parallel versions of the above - static void possibly_parallel_unlink_or_oops_do(BoolObjectClosure* cl, OopClosure* f, int* processed, int* removed); - static void possibly_parallel_unlink(BoolObjectClosure* cl, int* processed, int* removed) { - possibly_parallel_unlink_or_oops_do(cl, NULL, processed, removed); - } - static void possibly_parallel_oops_do(OopClosure* f); - - // Internal test. - static void test_alt_hash() PRODUCT_RETURN; + static void possibly_parallel_unlink( + OopStorage::ParState* par_state_string, + BoolObjectClosure* cl, int* processed, int* removed); + static void possibly_parallel_oops_do( + OopStorage::ParState* par_state_string, + OopClosure* f); // Probing static oop lookup(Symbol* symbol); @@ -138,46 +140,28 @@ static oop intern(oop string, TRAPS); static oop intern(const char *utf8_string, TRAPS); - // Debugging - static void verify(); - static void dump(outputStream* st, bool verbose=false); - - enum VerifyMesgModes { - _verify_quietly = 0, - _verify_with_mesgs = 1 - }; - - enum VerifyRetTypes { - _verify_pass = 0, - _verify_fail_continue = 1, - _verify_fail_done = 2 - }; - - static VerifyRetTypes compare_entries(int bkt1, int e_cnt1, - HashtableEntry* e_ptr1, - int bkt2, int e_cnt2, - HashtableEntry* e_ptr2); - static VerifyRetTypes verify_entry(int bkt, int e_cnt, - HashtableEntry* e_ptr, - VerifyMesgModes mesg_mode); - static int verify_and_compare_entries(); + // Rehash the string table if it gets out of balance + static void rehash_table(); + static bool needs_rehashing() + { return StringTable::the_table()->_needs_rehashing; } // Sharing + private: + oop lookup_shared(jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(NULL); + static void copy_shared_string_table(CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN; + public: + static oop create_archived_string(oop s, Thread* THREAD); static void set_shared_string_mapped() { _shared_string_mapped = true; } static bool shared_string_mapped() { return _shared_string_mapped; } static void shared_oops_do(OopClosure* f) NOT_CDS_JAVA_HEAP_RETURN; - static bool copy_shared_string(GrowableArray *string_space, - CompactStringTableWriter* ch_table) NOT_CDS_JAVA_HEAP_RETURN_(false); - static oop create_archived_string(oop s, Thread* THREAD) NOT_CDS_JAVA_HEAP_RETURN_(NULL); - static void write_to_archive(GrowableArray *string_space) NOT_CDS_JAVA_HEAP_RETURN; + static void write_to_archive() NOT_CDS_JAVA_HEAP_RETURN; static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN; - // Rehash the symbol table if it gets out of balance - static void rehash_table(); - static bool needs_rehashing() { return _needs_rehashing; } + // Jcmd + static void dump(outputStream* st, bool verbose=false); + // Debugging + static size_t verify_and_compare_entries(); + static void verify(); +}; - // Parallel chunked scanning - static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } - static int parallel_claimed_index() { return _parallel_claimed_idx; } -}; #endif // SHARE_VM_CLASSFILE_STRINGTABLE_HPP diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/cms/cmsHeap.cpp --- a/src/hotspot/share/gc/cms/cmsHeap.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/cms/cmsHeap.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -220,13 +220,14 @@ ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure) { + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string) { MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations); CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure); if (!only_strong_roots) { - process_string_table_roots(scope, root_closure); + process_string_table_roots(scope, root_closure, par_state_string); } if (young_gen_as_roots && diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/cms/cmsHeap.hpp --- a/src/hotspot/share/gc/cms/cmsHeap.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/cms/cmsHeap.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -30,6 +30,7 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/genCollectedHeap.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "utilities/growableArray.hpp" class CLDClosure; @@ -90,7 +91,8 @@ ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure); + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string = NULL); GCMemoryManager* old_manager() const { return _old_manager; } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp --- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -54,6 +54,7 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/isGCActiveMark.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/space.inline.hpp" #include "gc/shared/strongRootsScope.hpp" @@ -2769,10 +2770,12 @@ protected: CMSCollector* _collector; uint _n_workers; + OopStorage::ParState _par_state_string; CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) : AbstractGangTask(name), _collector(collector), - _n_workers(n_workers) {} + _n_workers(n_workers), + _par_state_string(StringTable::weak_storage()) {} // Work method in support of parallel rescan ... of young gen spaces void do_young_space_rescan(OopsInGenClosure* cl, ContiguousSpace* space, @@ -4274,7 +4277,9 @@ GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mri_cl, - &cld_closure); + &cld_closure, + &_par_state_string); + assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); @@ -4403,7 +4408,8 @@ GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), _collector->should_unload_classes(), &par_mrias_cl, - NULL); // The dirty klasses will be handled below + NULL, // The dirty klasses will be handled below + &_par_state_string); assert(_collector->should_unload_classes() || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/cms/parNewGeneration.cpp --- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/stringTable.hpp" #include "gc/cms/cmsHeap.inline.hpp" #include "gc/cms/compactibleFreeListSpace.hpp" #include "gc/cms/concurrentMarkSweepGeneration.hpp" @@ -589,7 +590,8 @@ _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set), - _strong_roots_scope(strong_roots_scope) + _strong_roots_scope(strong_roots_scope), + _par_state_string(StringTable::weak_storage()) {} void ParNewGenTask::work(uint worker_id) { @@ -611,7 +613,8 @@ heap->young_process_roots(_strong_roots_scope, &par_scan_state.to_space_root_closure(), &par_scan_state.older_gen_closure(), - &cld_scan_closure); + &cld_scan_closure, + &_par_state_string); par_scan_state.end_strong_roots(); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/cms/parNewGeneration.hpp --- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -29,6 +29,7 @@ #include "gc/serial/defNewGeneration.hpp" #include "gc/shared/copyFailedInfo.hpp" #include "gc/shared/gcTrace.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/plab.hpp" #include "gc/shared/preservedMarks.hpp" #include "gc/shared/taskqueue.hpp" @@ -236,6 +237,7 @@ HeapWord* _young_old_boundary; class ParScanThreadStateSet* _state_set; StrongRootsScope* _strong_roots_scope; + OopStorage::ParState _par_state_string; public: ParNewGenTask(ParNewGeneration* young_gen, diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/collectionSetChooser.cpp --- a/src/hotspot/share/gc/g1/collectionSetChooser.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/collectionSetChooser.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -147,7 +147,7 @@ void CollectionSetChooser::add_region(HeapRegion* hr) { assert(!hr->is_pinned(), "Pinned region shouldn't be added to the collection set (index %u)", hr->hrm_index()); - assert(!hr->is_young(), "should not be young!"); + assert(hr->is_old(), "should be old but is %s", hr->get_type_str()); assert(hr->rem_set()->is_complete(), "Trying to add region %u to the collection set with incomplete remembered set", hr->hrm_index()); _regions.append(hr); @@ -185,7 +185,7 @@ void CollectionSetChooser::set_region(uint index, HeapRegion* hr) { assert(regions_at(index) == NULL, "precondition"); - assert(!hr->is_young(), "should not be young!"); + assert(hr->is_old(), "should be old but is %s", hr->get_type_str()); regions_at_put(index, hr); hr->calc_gc_efficiency(); } @@ -233,18 +233,19 @@ _cset_updater(hrSorted, true /* parallel */, chunk_size) { } bool do_heap_region(HeapRegion* r) { - // Do we have any marking information for this region? - if (r->is_marked()) { - // We will skip any region that's currently used as an old GC - // alloc region (we should not consider those for collection - // before we fill them up). - if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { - _cset_updater.add_region(r); - } else if (r->is_old()) { - // Can clean out the remembered sets of all regions that we did not choose but - // we created the remembered set for. - r->rem_set()->clear(true); - } + // We will skip any region that's currently used as an old GC + // alloc region (we should not consider those for collection + // before we fill them up). + if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) { + _cset_updater.add_region(r); + } else if (r->is_old()) { + // Keep remembered sets for humongous regions, otherwise clean out remembered + // sets for old regions. + r->rem_set()->clear(true /* only_cardset */); + } else { + assert(!r->is_old() || !r->rem_set()->is_tracked(), + "Missed to clear unused remembered set of region %u (%s) that is %s", + r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str()); } return false; } @@ -280,11 +281,10 @@ } bool CollectionSetChooser::should_add(HeapRegion* hr) const { - assert(hr->is_marked(), "pre-condition"); - assert(!hr->is_young(), "should never consider young regions"); - return !hr->is_pinned() && - region_occupancy_low_enough_for_evac(hr->live_bytes()) && - hr->rem_set()->is_complete(); + return !hr->is_young() && + !hr->is_pinned() && + region_occupancy_low_enough_for_evac(hr->live_bytes()) && + hr->rem_set()->is_complete(); } void CollectionSetChooser::rebuild(WorkGang* workers, uint n_regions) { diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/g1CollectedHeap.cpp --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -69,6 +69,7 @@ #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/generationSpec.hpp" #include "gc/shared/isGCActiveMark.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/preservedMarks.inline.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shared/referenceProcessor.inline.hpp" @@ -3218,6 +3219,7 @@ private: BoolObjectClosure* _is_alive; G1StringDedupUnlinkOrOopsDoClosure _dedup_closure; + OopStorage::ParState _par_state_string; int _initial_string_table_size; int _initial_symbol_table_size; @@ -3237,24 +3239,19 @@ AbstractGangTask("String/Symbol Unlinking"), _is_alive(is_alive), _dedup_closure(is_alive, NULL, false), + _par_state_string(StringTable::weak_storage()), _process_strings(process_strings), _strings_processed(0), _strings_removed(0), _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0), _process_string_dedup(process_string_dedup) { - _initial_string_table_size = StringTable::the_table()->table_size(); + _initial_string_table_size = (int) StringTable::the_table()->table_size(); _initial_symbol_table_size = SymbolTable::the_table()->table_size(); - if (process_strings) { - StringTable::clear_parallel_claimed_index(); - } if (process_symbols) { SymbolTable::clear_parallel_claimed_index(); } } ~G1StringAndSymbolCleaningTask() { - guarantee(!_process_strings || StringTable::parallel_claimed_index() >= _initial_string_table_size, - "claim value %d after unlink less than initial string table size %d", - StringTable::parallel_claimed_index(), _initial_string_table_size); guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, "claim value %d after unlink less than initial symbol table size %d", SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); @@ -3273,7 +3270,7 @@ int symbols_processed = 0; int symbols_removed = 0; if (_process_strings) { - StringTable::possibly_parallel_unlink(_is_alive, &strings_processed, &strings_removed); + StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); Atomic::add(strings_processed, &_strings_processed); Atomic::add(strings_removed, &_strings_removed); } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/g1ConcurrentMark.cpp --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -1651,7 +1651,11 @@ } if (has_overflown()) { - // We can not trust g1_is_alive if the marking stack overflowed + // We can not trust g1_is_alive and the contents of the heap if the marking stack + // overflowed while processing references. Exit the VM. + fatal("Overflow during reference processing, can not continue. Please " + "increase MarkStackSizeMax (current value: " SIZE_FORMAT ") and " + "restart.", MarkStackSizeMax); return; } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/g1Policy.cpp --- a/src/hotspot/share/gc/g1/g1Policy.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1Policy.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -825,10 +825,10 @@ size_t G1Policy::predict_bytes_to_copy(HeapRegion* hr) const { size_t bytes_to_copy; - if (hr->is_marked()) + if (!hr->is_young()) { bytes_to_copy = hr->max_live_bytes(); - else { - assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant"); + } else { + assert(hr->age_in_surv_rate_group() != -1, "invariant"); int age = hr->age_in_surv_rate_group(); double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group()); bytes_to_copy = (size_t) (hr->used() * yg_surv_rate); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/g1RootProcessor.cpp --- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -38,6 +38,7 @@ #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootProcessor.hpp" #include "gc/g1/heapRegion.inline.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/weakProcessor.hpp" #include "memory/allocation.inline.hpp" @@ -72,6 +73,7 @@ _process_strong_tasks(G1RP_PS_NumElements), _srs(n_workers), _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never), + _par_state_string(StringTable::weak_storage()), _n_workers_discovered_strong_classes(0) {} void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) { @@ -301,7 +303,7 @@ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::StringTableRoots, worker_i); // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. - StringTable::possibly_parallel_oops_do(closures->weak_oops()); + StringTable::possibly_parallel_oops_do(&_par_state_string, closures->weak_oops()); } void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure, diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/g1RootProcessor.hpp --- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP #define SHARE_VM_GC_G1_G1ROOTPROCESSOR_HPP +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/strongRootsScope.hpp" #include "memory/allocation.hpp" #include "runtime/mutex.hpp" @@ -49,6 +50,7 @@ G1CollectedHeap* _g1h; SubTasksDone _process_strong_tasks; StrongRootsScope _srs; + OopStorage::ParState _par_state_string; // Used to implement the Thread work barrier. Monitor _lock; diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/g1_globals.hpp --- a/src/hotspot/share/gc/g1/g1_globals.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1_globals.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -108,9 +108,6 @@ "When expanding, % of uncommitted space to claim.") \ range(0, 100) \ \ - develop(bool, G1RSBarrierRegionFilter, true, \ - "If true, generate region filtering code in RS barrier") \ - \ product(size_t, G1UpdateBufferSize, 256, \ "Size of an update buffer") \ range(1, NOT_LP64(32*M) LP64_ONLY(1*G)) \ diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/g1/heapRegion.hpp --- a/src/hotspot/share/gc/g1/heapRegion.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/g1/heapRegion.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -541,10 +541,6 @@ // objects during evac failure handling. void note_self_forwarding_removal_end(size_t marked_bytes); - // Returns "false" iff no object in the region was allocated when the - // last mark phase ended. - bool is_marked() { return _prev_top_at_mark_start != bottom(); } - void reset_during_compaction() { assert(is_humongous(), "should only be called for humongous regions"); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/shared/genCollectedHeap.cpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -44,6 +44,7 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/generationSpec.hpp" +#include "gc/shared/oopStorageParState.inline.hpp" #include "gc/shared/space.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/vmGCOperations.hpp" @@ -851,12 +852,17 @@ } void GenCollectedHeap::process_string_table_roots(StrongRootsScope* scope, - OopClosure* root_closure) { + OopClosure* root_closure, + OopStorage::ParState* par_state_string) { assert(root_closure != NULL, "Must be set"); // All threads execute the following. A specific chunk of buckets // from the StringTable are the individual tasks. + + // Either we should be single threaded or have a ParState + assert((scope->n_threads() <= 1) || par_state_string != NULL, "Parallel but no ParState"); + if (scope->n_threads() > 1) { - StringTable::possibly_parallel_oops_do(root_closure); + StringTable::possibly_parallel_oops_do(par_state_string, root_closure); } else { StringTable::oops_do(root_closure); } @@ -865,12 +871,13 @@ void GenCollectedHeap::young_process_roots(StrongRootsScope* scope, OopsInGenClosure* root_closure, OopsInGenClosure* old_gen_closure, - CLDClosure* cld_closure) { + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string) { MarkingCodeBlobClosure mark_code_closure(root_closure, CodeBlobToOopClosure::FixRelocations); process_roots(scope, SO_ScavengeCodeCache, root_closure, cld_closure, cld_closure, &mark_code_closure); - process_string_table_roots(scope, root_closure); + process_string_table_roots(scope, root_closure, par_state_string); if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) { root_closure->reset_generation(); @@ -890,7 +897,8 @@ ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure) { + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string) { MarkingCodeBlobClosure mark_code_closure(root_closure, is_adjust_phase); CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure; @@ -899,7 +907,7 @@ // We never treat the string table as roots during marking // for the full gc, so we only need to process it during // the adjust phase. - process_string_table_roots(scope, root_closure); + process_string_table_roots(scope, root_closure, par_state_string); } _process_strong_tasks->all_tasks_completed(scope->n_threads()); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/shared/genCollectedHeap.hpp --- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -28,6 +28,7 @@ #include "gc/shared/collectedHeap.hpp" #include "gc/shared/collectorPolicy.hpp" #include "gc/shared/generation.hpp" +#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/softRefGenPolicy.hpp" class AdaptiveSizePolicy; @@ -401,7 +402,8 @@ CodeBlobToOopClosure* code_roots); void process_string_table_roots(StrongRootsScope* scope, - OopClosure* root_closure); + OopClosure* root_closure, + OopStorage::ParState* par_state_string); // Accessor for memory state verification support NOT_PRODUCT( @@ -415,14 +417,16 @@ void young_process_roots(StrongRootsScope* scope, OopsInGenClosure* root_closure, OopsInGenClosure* old_gen_closure, - CLDClosure* cld_closure); + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string = NULL); void full_process_roots(StrongRootsScope* scope, bool is_adjust_phase, ScanningOption so, bool only_strong_roots, OopsInGenClosure* root_closure, - CLDClosure* cld_closure); + CLDClosure* cld_closure, + OopStorage::ParState* par_state_string = NULL); // Apply "root_closure" to all the weak roots of the system. // These include JNI weak roots, string table, diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/gc/shared/strongRootsScope.cpp --- a/src/hotspot/share/gc/shared/strongRootsScope.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/gc/shared/strongRootsScope.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -38,8 +38,6 @@ StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) { Threads::change_thread_claim_parity(); - // Zero the claimed high water mark in the StringTable - StringTable::clear_parallel_claimed_index(); } StrongRootsScope::~StrongRootsScope() { diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/memory/metaspaceShared.cpp --- a/src/hotspot/share/memory/metaspaceShared.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -1841,7 +1841,7 @@ G1CollectedHeap::heap()->begin_archive_alloc_range(); // Archive interned string objects - StringTable::write_to_archive(closed_archive); + StringTable::write_to_archive(); G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, os::vm_allocation_granularity()); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/oops/weakHandle.cpp --- a/src/hotspot/share/oops/weakHandle.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/oops/weakHandle.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/systemDictionary.hpp" +#include "classfile/stringTable.hpp" #include "gc/shared/oopStorage.hpp" #include "oops/access.inline.hpp" #include "oops/oop.hpp" @@ -35,6 +36,10 @@ return SystemDictionary::vm_weak_oop_storage(); } +template <> OopStorage* WeakHandle::get_storage() { + return StringTable::weak_storage(); +} + template WeakHandle WeakHandle::create(Handle obj) { assert(obj() != NULL, "no need to create weak null oop"); @@ -68,4 +73,5 @@ // Provide instantiation. template class WeakHandle; +template class WeakHandle; diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/oops/weakHandle.hpp --- a/src/hotspot/share/oops/weakHandle.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/oops/weakHandle.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -39,12 +39,11 @@ // This is the vm version of jweak but has different GC lifetimes and policies, // depending on the type. -enum WeakHandleType { vm_class_loader_data, vm_string }; +enum WeakHandleType { vm_class_loader_data, vm_string, vm_string_table_data }; template class WeakHandle { public: - private: oop* _obj; @@ -59,6 +58,8 @@ void release() const; bool is_null() const { return _obj == NULL; } + void replace(oop with_obj); + void print() const; void print_on(outputStream* st) const; }; diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/oops/weakHandle.inline.hpp --- a/src/hotspot/share/oops/weakHandle.inline.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/oops/weakHandle.inline.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -40,4 +40,10 @@ return RootAccess::oop_load(_obj); } +template +void WeakHandle::replace(oop with_obj) { + RootAccess::oop_store(_obj, with_obj); +} + #endif // SHARE_VM_OOPS_WEAKHANDLE_INLINE_HPP + diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/globals.hpp --- a/src/hotspot/share/runtime/globals.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/globals.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -2542,8 +2542,9 @@ "Relax the access control checks in the verifier") \ \ product(uintx, StringTableSize, defaultStringTableSize, \ - "Number of buckets in the interned String table") \ - range(minimumStringTableSize, 111*defaultStringTableSize) \ + "Number of buckets in the interned String table " \ + "(will be rounded to nearest higher power of 2)") \ + range(minimumStringTableSize, 16777216ul) \ \ experimental(uintx, SymbolTableSize, defaultSymbolTableSize, \ "Number of buckets in the JVM internal Symbol table") \ diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/java.cpp --- a/src/hotspot/share/runtime/java.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/java.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -524,14 +524,9 @@ } if (VerifyStringTableAtExit) { - int fail_cnt = 0; - { - MutexLocker ml(StringTable_lock); - fail_cnt = StringTable::verify_and_compare_entries(); - } - + size_t fail_cnt = StringTable::verify_and_compare_entries(); if (fail_cnt != 0) { - tty->print_cr("ERROR: fail_cnt=%d", fail_cnt); + tty->print_cr("ERROR: fail_cnt=" SIZE_FORMAT, fail_cnt); guarantee(fail_cnt == 0, "unexpected StringTable verification failures"); } } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/mutexLocker.cpp --- a/src/hotspot/share/runtime/mutexLocker.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/mutexLocker.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -48,6 +48,8 @@ Mutex* JNIGlobalActive_lock = NULL; Mutex* JNIWeakAlloc_lock = NULL; Mutex* JNIWeakActive_lock = NULL; +Mutex* StringTableWeakAlloc_lock = NULL; +Mutex* StringTableWeakActive_lock = NULL; Mutex* JNIHandleBlockFreeList_lock = NULL; Mutex* VMWeakAlloc_lock = NULL; Mutex* VMWeakActive_lock = NULL; @@ -186,6 +188,9 @@ def(VMWeakAlloc_lock , PaddedMutex , vmweak, true, Monitor::_safepoint_check_never); def(VMWeakActive_lock , PaddedMutex , vmweak-1, true, Monitor::_safepoint_check_never); + def(StringTableWeakAlloc_lock , PaddedMutex , vmweak, true, Monitor::_safepoint_check_never); + def(StringTableWeakActive_lock , PaddedMutex , vmweak-1, true, Monitor::_safepoint_check_never); + if (UseConcMarkSweepGC || UseG1GC) { def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/mutexLocker.hpp --- a/src/hotspot/share/runtime/mutexLocker.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/mutexLocker.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -42,6 +42,8 @@ extern Mutex* JNIGlobalActive_lock; // JNI global storage active list lock extern Mutex* JNIWeakAlloc_lock; // JNI weak storage allocate list lock extern Mutex* JNIWeakActive_lock; // JNI weak storage active list lock +extern Mutex* StringTableWeakAlloc_lock; // StringTable weak storage allocate list lock +extern Mutex* StringTableWeakActive_lock; // STringTable weak storage active list lock extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list extern Mutex* VMWeakAlloc_lock; // VM Weak Handles storage allocate list lock extern Mutex* VMWeakActive_lock; // VM Weak Handles storage active list lock diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/serviceThread.cpp --- a/src/hotspot/share/runtime/serviceThread.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/serviceThread.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "classfile/stringTable.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/serviceThread.hpp" @@ -82,6 +83,7 @@ bool has_gc_notification_event = false; bool has_dcmd_notification_event = false; bool acs_notify = false; + bool stringtable_work = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -98,7 +100,8 @@ while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) && !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && !(has_gc_notification_event = GCNotifier::has_event()) && - !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification())) { + !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) && + !(stringtable_work = StringTable::has_work())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -109,6 +112,10 @@ } } + if (stringtable_work) { + StringTable::do_concurrent_work(jt); + } + if (has_jvmti_events) { jvmti_event.post(); } diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/vmStructs.cpp --- a/src/hotspot/share/runtime/vmStructs.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/vmStructs.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -164,7 +164,6 @@ typedef Hashtable IntptrHashtable; typedef Hashtable SymbolHashtable; typedef HashtableEntry SymbolHashtableEntry; -typedef Hashtable StringHashtable; typedef Hashtable KlassHashtable; typedef HashtableEntry KlassHashtableEntry; typedef CompactHashtable SymbolCompactHashTable; @@ -476,12 +475,6 @@ static_field(SymbolTable, _shared_table, SymbolCompactHashTable) \ static_field(RehashableSymbolHashtable, _seed, juint) \ \ - /***************/ \ - /* StringTable */ \ - /***************/ \ - \ - static_field(StringTable, _the_table, StringTable*) \ - \ /********************/ \ /* CompactHashTable */ \ /********************/ \ @@ -1365,7 +1358,6 @@ declare_toplevel_type(BasicHashtable) \ declare_type(RehashableSymbolHashtable, BasicHashtable) \ declare_type(SymbolTable, SymbolHashtable) \ - declare_type(StringTable, StringHashtable) \ declare_type(Dictionary, KlassHashtable) \ declare_toplevel_type(BasicHashtableEntry) \ declare_type(IntptrHashtableEntry, BasicHashtableEntry) \ diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/runtime/vm_operations.hpp --- a/src/hotspot/share/runtime/vm_operations.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/runtime/vm_operations.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -103,6 +103,7 @@ template(RotateGCLog) \ template(WhiteBoxOperation) \ template(ClassLoaderStatsOperation) \ + template(ClassLoaderHierarchyOperation) \ template(DumpHashtable) \ template(DumpTouchedMethods) \ template(MarkActiveNMethods) \ diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/services/diagnosticCommand.cpp --- a/src/hotspot/share/services/diagnosticCommand.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/services/diagnosticCommand.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "jvm.h" +#include "classfile/classLoaderHierarchyDCmd.hpp" #include "classfile/classLoaderStats.hpp" #include "classfile/compactHashtable.hpp" #include "compiler/compileBroker.hpp" @@ -101,6 +102,7 @@ #endif // INCLUDE_JVMTI DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); + DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); DCmdFactory::register_DCmdFactory(new DCmdFactoryImpl(full_export, true, false)); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/utilities/concurrentHashTable.hpp --- a/src/hotspot/share/utilities/concurrentHashTable.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -484,6 +484,9 @@ void statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st, const char* table_name); + // Moves all nodes from this table to to_cht + bool try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht); + // This is a Curiously Recurring Template Pattern (CRPT) interface for the // specialization. struct BaseConfig { diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/utilities/concurrentHashTable.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -293,7 +293,7 @@ inline void ConcurrentHashTable:: write_synchonize_on_visible_epoch(Thread* thread) { - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); OrderAccess::fence(); // Prevent below load from floating up. // If no reader saw this version we can skip write_synchronize. if (OrderAccess::load_acquire(&_invisible_epoch) == thread) { @@ -488,7 +488,7 @@ { // Here we have resize lock so table is SMR safe, and there is no new // table. Can do this in parallel if we want. - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); Node* ndel[BULK_DELETE_LIMIT]; InternalTable* table = get_table(); assert(start_idx < stop_idx, "Must be"); @@ -500,9 +500,9 @@ // own read-side. GlobalCounter::critical_section_begin(thread); for (size_t bucket_it = start_idx; bucket_it < stop_idx; bucket_it++) { - Bucket* bucket = _table->get_bucket(bucket_it); + Bucket* bucket = table->get_bucket(bucket_it); Bucket* prefetch_bucket = (bucket_it+1) < stop_idx ? - _table->get_bucket(bucket_it+1) : NULL; + table->get_bucket(bucket_it+1) : NULL; if (!HaveDeletables::value, EVALUATE_FUNC>:: have_deletable(bucket, eval_f, prefetch_bucket)) { @@ -695,17 +695,13 @@ if (!try_resize_lock(thread)) { return false; } - - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); - + assert(_resize_lock_owner == thread, "Re-size lock not held"); if (_table->_log2_size == _log2_start_size || _table->_log2_size <= log2_size) { unlock_resize_lock(thread); return false; } - _new_table = new InternalTable(_table->_log2_size - 1); - return true; } @@ -713,8 +709,7 @@ inline void ConcurrentHashTable:: internal_shrink_epilog(Thread* thread) { - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); - assert(_resize_lock_owner, "Should be locked"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); InternalTable* old_table = set_table_from_new(); _size_limit_reached = false; @@ -771,14 +766,13 @@ internal_shrink(Thread* thread, size_t log2_size) { if (!internal_shrink_prolog(thread, log2_size)) { - assert(!_resize_lock->owned_by_self(), "Re-size lock held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return false; } - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); assert(_resize_lock_owner == thread, "Should be locked by me"); internal_shrink_range(thread, 0, _new_table->_size); internal_shrink_epilog(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return true; } @@ -815,8 +809,7 @@ inline void ConcurrentHashTable:: internal_grow_epilog(Thread* thread) { - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); - assert(_resize_lock_owner, "Should be locked"); + assert(_resize_lock_owner == thread, "Should be locked"); InternalTable* old_table = set_table_from_new(); unlock_resize_lock(thread); @@ -835,14 +828,13 @@ internal_grow(Thread* thread, size_t log2_size) { if (!internal_grow_prolog(thread, log2_size)) { - assert(!_resize_lock->owned_by_self(), "Re-size lock held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return false; } - assert(_resize_lock->owned_by_self(), "Re-size lock not held"); assert(_resize_lock_owner == thread, "Should be locked by me"); internal_grow_range(thread, 0, _table->_size); internal_grow_epilog(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return true; } @@ -955,15 +947,13 @@ inline void ConcurrentHashTable:: do_scan_locked(Thread* thread, FUNC& scan_f) { - assert(_resize_lock->owned_by_self() || - (thread->is_VM_thread() && SafepointSynchronize::is_at_safepoint()), - "Re-size lock not held or not VMThread at safepoint"); + assert(_resize_lock_owner == thread, "Re-size lock not held"); // We can do a critical section over the entire loop but that would block // updates for a long time. Instead we choose to block resizes. InternalTable* table = get_table(); - for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { ScopedCS cs(thread, this); - if (!visit_nodes(_table->get_bucket(bucket_it), scan_f)) { + if (!visit_nodes(table->get_bucket(bucket_it), scan_f)) { break; /* ends critical section */ } } /* ends critical section */ @@ -1094,17 +1084,11 @@ inline bool ConcurrentHashTable:: try_scan(Thread* thread, SCAN_FUNC& scan_f) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); - bool vm_and_safepoint = thread->is_VM_thread() && - SafepointSynchronize::is_at_safepoint(); - if (!vm_and_safepoint && !try_resize_lock(thread)) { + if (!try_resize_lock(thread)) { return false; } do_scan_locked(thread, scan_f); - if (!vm_and_safepoint) { - unlock_resize_lock(thread); - } - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + unlock_resize_lock(thread); return true; } @@ -1113,11 +1097,11 @@ inline void ConcurrentHashTable:: do_scan(Thread* thread, SCAN_FUNC& scan_f) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); lock_resize_lock(thread); do_scan_locked(thread, scan_f); unlock_resize_lock(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); } template @@ -1126,12 +1110,11 @@ try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { if (!try_resize_lock(thread)) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); return false; } do_bulk_delete_locked(thread, eval_f, del_f); unlock_resize_lock(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); + assert(_resize_lock_owner != thread, "Re-size lock held"); return true; } @@ -1140,11 +1123,9 @@ inline void ConcurrentHashTable:: bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); lock_resize_lock(thread); do_bulk_delete_locked(thread, eval_f, del_f); unlock_resize_lock(thread); - assert(!_resize_lock->owned_by_self(), "Re-size lock not held"); } template @@ -1155,17 +1136,16 @@ { NumberSeq summary; size_t literal_bytes = 0; - if ((thread->is_VM_thread() && !SafepointSynchronize::is_at_safepoint()) || - (!thread->is_VM_thread() && !try_resize_lock(thread))) { + if (!try_resize_lock(thread)) { st->print_cr("statistics unavailable at this moment"); return; } InternalTable* table = get_table(); - for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { + for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) { ScopedCS cs(thread, this); size_t count = 0; - Bucket* bucket = _table->get_bucket(bucket_it); + Bucket* bucket = table->get_bucket(bucket_it); if (bucket->have_redirect() || bucket->is_locked()) { continue; } @@ -1208,9 +1188,37 @@ st->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); st->print_cr("Maximum bucket size : %9" PRIuPTR, (size_t)summary.maximum()); - if (!thread->is_VM_thread()) { - unlock_resize_lock(thread); + unlock_resize_lock(thread); +} + +template +inline bool ConcurrentHashTable:: + try_move_nodes_to(Thread* thread, ConcurrentHashTable* to_cht) +{ + if (!try_resize_lock(thread)) { + return false; } + assert(_new_table == NULL, "Must be NULL"); + for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { + Bucket* bucket = _table->get_bucket(bucket_it); + assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended"); + while (bucket->first() != NULL) { + Node* move_node = bucket->first(); + bool ok = bucket->cas_first(move_node->next(), move_node); + assert(ok, "Uncontended cas must work"); + bool dead_hash = false; + size_t insert_hash = CONFIG::get_hash(*move_node->value(), &dead_hash); + if (!dead_hash) { + Bucket* insert_bucket = to_cht->get_bucket(insert_hash); + assert(!bucket->have_redirect() && !bucket->is_locked(), "Not bit should be present"); + move_node->set_next(insert_bucket->first()); + ok = insert_bucket->cas_first(move_node, insert_bucket->first()); + assert(ok, "Uncontended cas must work"); + } + } + } + unlock_resize_lock(thread); + return true; } #endif // include guard diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp --- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -25,6 +25,7 @@ #ifndef SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP #define SHARE_UTILITIES_CONCURRENT_HASH_TABLE_TASKS_INLINE_HPP +#include "utilities/globalDefinitions.hpp" #include "utilities/concurrentHashTable.inline.hpp" // This inline file contains BulkDeleteTask and GrowTasks which are both bucket @@ -63,6 +64,7 @@ // Calculate starting values. void setup() { _size_log2 = _cht->_table->_log2_size; + _task_size_log2 = MIN2(_task_size_log2, _size_log2); size_t tmp = _size_log2 > _task_size_log2 ? _size_log2 - _task_size_log2 : 0; _stop_task = (((size_t)1) << tmp); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/hotspot/share/utilities/globalDefinitions.hpp --- a/src/hotspot/share/utilities/globalDefinitions.hpp Thu Jun 07 10:48:36 2018 +0200 +++ b/src/hotspot/share/utilities/globalDefinitions.hpp Thu Jun 07 15:01:13 2018 +0200 @@ -424,8 +424,8 @@ //---------------------------------------------------------------------------------------------------- // Default and minimum StringTableSize values -const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013); -const int minimumStringTableSize = 1009; +const int defaultStringTableSize = NOT_LP64(1024) LP64_ONLY(65536); +const int minimumStringTableSize = 128; const int defaultSymbolTableSize = 20011; const int minimumSymbolTableSize = 1009; diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/java.base/share/classes/java/nio/channels/SelectionKey.java --- a/src/java.base/share/classes/java/nio/channels/SelectionKey.java Thu Jun 07 10:48:36 2018 +0200 +++ b/src/java.base/share/classes/java/nio/channels/SelectionKey.java Thu Jun 07 15:01:13 2018 +0200 @@ -190,6 +190,83 @@ public abstract SelectionKey interestOps(int ops); /** + * Atomically sets this key's interest set to the bitwise union ("or") of + * the existing interest set and the given value. This method is guaranteed + * to be atomic with respect to other concurrent calls to this method or to + * {@link #interestOpsAnd(int)}. + * + *

This method may be invoked at any time. If this method is invoked + * while a selection operation is in progress then it has no effect upon + * that operation; the change to the key's interest set will be seen by the + * next selection operation. + * + * @implSpec The default implementation synchronizes on this key and invokes + * {@code interestOps()} and {@code interestOps(int)} to retrieve and set + * this key's interest set. + * + * @param ops The interest set to apply + * + * @return The previous interest set + * + * @throws IllegalArgumentException + * If a bit in the set does not correspond to an operation that + * is supported by this key's channel, that is, if + * {@code (ops & ~channel().validOps()) != 0} + * + * @throws CancelledKeyException + * If this key has been cancelled + * + * @since 11 + */ + public int interestOpsOr(int ops) { + synchronized (this) { + int oldVal = interestOps(); + interestOps(oldVal | ops); + return oldVal; + } + } + + /** + * Atomically sets this key's interest set to the bitwise intersection ("and") + * of the existing interest set and the given value. This method is guaranteed + * to be atomic with respect to other concurrent calls to this method or to + * {@link #interestOpsOr(int)}. + * + *

This method may be invoked at any time. If this method is invoked + * while a selection operation is in progress then it has no effect upon + * that operation; the change to the key's interest set will be seen by the + * next selection operation. + * + * @apiNote Unlike the {@code interestOps(int)} and {@code interestOpsOr(int)} + * methods, this method does not throw {@code IllegalArgumentException} when + * invoked with bits in the interest set that do not correspond to an + * operation that is supported by this key's channel. This is to allow + * operation bits in the interest set to be cleared using bitwise complement + * values, e.g., {@code interestOpsAnd(~SelectionKey.OP_READ)} will remove + * the {@code OP_READ} from the interest set without affecting other bits. + * + * @implSpec The default implementation synchronizes on this key and invokes + * {@code interestOps()} and {@code interestOps(int)} to retrieve and set + * this key's interest set. + * + * @param ops The interest set to apply + * + * @return The previous interest set + * + * @throws CancelledKeyException + * If this key has been cancelled + * + * @since 11 + */ + public int interestOpsAnd(int ops) { + synchronized (this) { + int oldVal = interestOps(); + interestOps(oldVal & ops); + return oldVal; + } + } + + /** * Retrieves this key's ready-operation set. * *

It is guaranteed that the returned set will only contain operation diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/java.base/share/classes/sun/nio/ch/SelectionKeyImpl.java --- a/src/java.base/share/classes/sun/nio/ch/SelectionKeyImpl.java Thu Jun 07 10:48:36 2018 +0200 +++ b/src/java.base/share/classes/sun/nio/ch/SelectionKeyImpl.java Thu Jun 07 15:01:13 2018 +0200 @@ -25,6 +25,9 @@ package sun.nio.ch; +import java.lang.invoke.ConstantBootstraps; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectableChannel; import java.nio.channels.SelectionKey; @@ -39,6 +42,13 @@ public final class SelectionKeyImpl extends AbstractSelectionKey { + private static final VarHandle INTERESTOPS = + ConstantBootstraps.fieldVarHandle( + MethodHandles.lookup(), + "interestOps", + VarHandle.class, + SelectionKeyImpl.class, int.class); + private final SelChImpl channel; private final SelectorImpl selector; @@ -84,7 +94,35 @@ @Override public SelectionKey interestOps(int ops) { ensureValid(); - return nioInterestOps(ops); + if ((ops & ~channel().validOps()) != 0) + throw new IllegalArgumentException(); + int oldOps = (int) INTERESTOPS.getAndSet(this, ops); + if (ops != oldOps) { + selector.setEventOps(this); + } + return this; + } + + @Override + public int interestOpsOr(int ops) { + ensureValid(); + if ((ops & ~channel().validOps()) != 0) + throw new IllegalArgumentException(); + int oldVal = (int) INTERESTOPS.getAndBitwiseOr(this, ops); + if (oldVal != (oldVal | ops)) { + selector.setEventOps(this); + } + return oldVal; + } + + @Override + public int interestOpsAnd(int ops) { + ensureValid(); + int oldVal = (int) INTERESTOPS.getAndBitwiseAnd(this, ops); + if (oldVal != (oldVal & ops)) { + selector.setEventOps(this); + } + return oldVal; } @Override diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/java.sql.rowset/share/classes/javax/sql/rowset/package-info.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/package-info.java Thu Jun 07 15:01:13 2018 +0200 @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * Standard interfaces and base classes for JDBC RowSet + * implementations. This package contains interfaces and classes + * that a standard RowSet implementation either implements or extends. + * + *

Table of Contents

+ * + * + *

1.0 Package Specification

+ * This package specifies five standard JDBC RowSet interfaces. + * All five extend the + * RowSet interface described in the JDBC 3.0 + * specification. It is anticipated that additional definitions + * of more specialized JDBC RowSet types will emerge as this technology + * matures. Future definitions should be specified as subinterfaces using + * inheritance similar to the way it is used in this specification. + *

+ * Note: The interface definitions provided in this package form the basis for + * all compliant JDBC RowSet implementations. Vendors and more advanced + * developers who intend to provide their own compliant RowSet implementations + * should pay particular attention to the assertions detailed in specification + * interfaces. + * + *

2.0 Standard RowSet Definitions

+ *
    + *
  • JdbcRowSet - A wrapper around + * a ResultSet object that makes it possible to use the result set as a + * JavaBeans™ component. Thus, + * a JdbcRowSet object can be a Bean that any tool + * makes available for assembling an application as part of a component based + * architecture. A JdbcRowSet object is a connected RowSet + * object, that is, it + * must continually maintain its connection to its data source using a JDBC + * technology-enabled driver ("JDBC driver"). In addition, a JdbcRowSet + * object provides a fully updatable and scrollable tabular + * data structure as defined in the JDBC 3.0 specification. + * + *
  • + * CachedRowSet + * - A CachedRowSet object is a JavaBeans™ + * component that is scrollable, updatable, serializable, and generally disconnected from + * the source of its data. A CachedRowSet object + * typically contains rows from a result set, but it can also contain rows from any + * file with a tabular format, such as a spreadsheet. CachedRowSet implementations + * must use the SyncFactory to manage and obtain pluggable + * SyncProvider objects to provide synchronization between the + * disconnected RowSet object and the originating data source. + * Typically a SyncProvider implementation relies upon a JDBC + * driver to obtain connectivity to a particular data source. + * Further details on this mechanism are discussed in the javax.sql.rowset.spi package + * specification. + * + *
  • WebRowSet - A + * WebRowSet object is an extension of CachedRowSet + * that can read and write a RowSet object in a well formed XML format. + * This class calls an XmlReader object + * (an extension of the RowSetReader + * interface) to read a rowset in XML format. It calls an + * XmlWriter object (an extension of the + * RowSetWriter interface) + * to write a rowset in XML format. The reader and writer required by + * WebRowSet objects are provided by the + * SyncFactory in the form of SyncProvider + * implementations. In order to ensure well formed XML usage, a standard generic XML + * Schema is defined and published at + * + * http://java.sun.com/xml/ns/jdbc/webrowset.xsd. + * + *
  • FilteredRowSet - A + * FilteredRowSet object provides filtering functionality in a programmatic + * and extensible way. There are many instances when a RowSet object + * has a need to provide filtering in its contents without sacrificing the disconnected + * environment, thus saving the expense of having to create a connection to the data source. + * Solutions to this need vary from providing heavyweight full scale + * SQL query abilities, to portable components, to more lightweight + * approaches. A FilteredRowSet object consumes + * an implementation of the Predicate + * interface, which may define a filter at run time. In turn, a + * FilteredRowSet object is tasked with enforcing the set filter for both + * inbound and outbound read and write operations. That is, all filters can be + * considered as bi-directional. No standard filters are defined; + * however, sufficient mechanics are specified to permit any required filter to be + * implemented. + * + *
  • JoinRowSet - The JoinRowSet + * interface describes a mechanism by which relationships can be established between + * two or more standard RowSet implementations. Any number of RowSet + * objects can be added to a JoinRowSet object provided the RowSetobjects + * can be related in a SQL JOIN like fashion. By definition, the SQL JOIN + * statement is used to combine the data contained in two (or more) relational + * database tables based upon a common attribute. By establishing and then enforcing + * column matches, a JoinRowSet object establishes relationships between + * RowSet instances without the need to touch the originating data source. + *
+ * + *

3.0 Implementer's Guide

+ * Compliant implementations of JDBC RowSet Implementations + * must follow the assertions described in this specification. In accordance + * with the terms of the Java Community Process, a + * Test Compatibility Kit (TCK) can be licensed to ensure compatibility with the + * specification. The following paragraphs outline a number of starting points for + * implementers of the standard JDBC RowSet definitions. Implementers + * should also consult the Implementer's Guide in the javax.sql.rowset.spi package for guidelines + * on SyncProvider implementations. + * + *
    + *
  • 3.1 Constructor + *

    + * All RowSet implementations must provide a + * no-argument constructor. + *

  • + *
  • 3.2 Role of the BaseRowSet Class + *

    + * A compliant JDBC RowSet implementation must implement one or more + * standard interfaces specified in this package and may extend the + * BaseRowSet abstract class. For example, a + * CachedRowSet implementation must implement the CachedRowSet + * interface and extend the BaseRowSet abstract class. The + * BaseRowSet class provides the standard architecture on which all + * RowSet implementations should be built, regardless of whether the + * RowSet objects exist in a connected or disconnected environment. + * The BaseRowSet abstract class provides any RowSet implementation + * with its base functionality, including property manipulation and event notification + * that is fully compliant with JavaBeans + * component requirements. As an example, all implementations provided in the + * reference implementations (contained in the com.sun.rowset package) use + * the BaseRowSet class as a basis for their implementations. + *

    + * The following table illustrates the features that the BaseRowSet + * abstract class provides. + *

    + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
    Features in BaseRowSet
    FeatureDetails
    PropertiesProvides standard JavaBeans property manipulation + * mechanisms to allow applications to get and set RowSet command and + * property values. Refer to the documentation of the javax.sql.RowSet + * interface (available in the JDBC 3.0 specification) for more details on + * the standard RowSet properties.
    Event notificationProvides standard JavaBeans event notifications + * to registered event listeners. Refer to the documentation of javax.sql.RowSetEvent + * interface (available in the JDBC 3.0 specification) for + * more details on how to register and handle standard RowSet events generated + * by compliant implementations.
    Setters for a RowSet object's commandProvides a complete set of setter methods + * for setting RowSet command parameters.
    StreamsProvides fields for storing of stream instances + * in addition to providing a set of constants for stream type designation.
    + *
    + * + *
  • 3.3 Connected RowSet Requirements + *

    + * The JdbcRowSet describes a RowSet object that must always + * be connected to the originating data source. Implementations of the JdbcRowSet + * should ensure that this connection is provided solely by a JDBC driver. + * Furthermore, RowSet objects that are implementations of the + * JdbcRowSet interface and are therefore operating in a connected environment + * do not use the SyncFactory to obtain a RowSetReader object + * or a RowSetWriter object. They can safely rely on the JDBC driver to + * supply their needs by virtue of the presence of an underlying updatable and scrollable + * ResultSet implementation. + * + *

  • + * 3.4 Disconnected RowSet Requirements + *

    + * A disconnected RowSet object, such as a CachedRowSet object, + * should delegate + * connection management to a SyncProvider object provided by the + * SyncFactory. To ensure fully disconnected semantics, all + * disconnected RowSet objects must ensure + * that the original connection made to the data source to populate the RowSet + * object is closed to permit the garbage collector to recover and release resources. The + * SyncProvider object ensures that the critical JDBC properties are + * maintained in order to re-establish a connection to the data source when a + * synchronization is required. A disconnected RowSet object should + * therefore ensure that no + * extraneous references remain on the Connection object. + * + *

  • 3.5 Role of RowSetMetaDataImpl + *

    + * The RowsetMetaDataImpl class is a utility class that provides an implementation of the + * RowSetMetaData interface, supplying standard setter + * method implementations for metadata for both connected and disconnected + * RowSet objects. All implementations are free to use this standard + * implementation but are not required to do so. + * + *

  • 3.6 RowSetWarning Class + *

    + * The RowSetWarning class provides warnings that can be set + * on RowSet implementations. + * Similar to SQLWarning objects, + * RowSetWarning objects are silently chained to the object whose method + * caused the warning to be thrown. All RowSet implementations should + * ensure that this chaining occurs if a warning is generated and also ensure that the + * warnings are available via the getRowSetWarnings method defined in either + * the JdbcRowSet interface or the CachedRowSet interface. + * After a warning has been retrieved with one of the + * getRowSetWarnings methods, the RowSetWarning method + * getNextWarning can be called on it to retrieve any warnings that might + * be chained on it. If a warning is returned, getNextWarning can be called + * on it, and so on until there are no more warnings. + * + *

  • 3.7 The Joinable Interface + *

    + * The Joinable interface provides both connected and disconnected + * RowSet objects with the capability to be added to a + * JoinRowSet object in an SQL JOIN operation. + * A RowSet object that has implemented the Joinable + * interface can set a match column, retrieve a match column, or unset a match column. + * A JoinRowSet object can then use the RowSet object's + * match column as a basis for adding the RowSet object. + *

  • + * + *
  • 3.8 The RowSetFactory Interface + *

    + * A RowSetFactory implementation must + * be provided. + *

  • + *
+ * + *

4.0 Related Specifications

+ * + * + *

5.0 Related Documentation

+ * + */ +package javax.sql.rowset; diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/java.sql.rowset/share/classes/javax/sql/rowset/package.html --- a/src/java.sql.rowset/share/classes/javax/sql/rowset/package.html Thu Jun 07 10:48:36 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,297 +0,0 @@ - - - - - - - javax.sql.rowset Package - - - - -Standard interfaces and base classes for JDBC RowSet -implementations. This package contains interfaces and classes -that a standard RowSet implementation either implements or extends. - - -

Table of Contents

- - -

1.0 Package Specification

-This package specifies five standard JDBC RowSet interfaces. - All five extend the -RowSet interface described in the JDBC 3.0 -specification. It is anticipated that additional definitions -of more specialized JDBC RowSet types will emerge as this technology -matures. Future definitions should be specified as subinterfaces using -inheritance similar to the way it is used in this specification. -

-Note: The interface definitions provided in this package form the basis for -all compliant JDBC RowSet implementations. Vendors and more advanced -developers who intend to provide their own compliant RowSet implementations -should pay particular attention to the assertions detailed in specification -interfaces. - -

2.0 Standard RowSet Definitions

-
    -
  • JdbcRowSet - A wrapper around -a ResultSet object that makes it possible to use the result set as a -JavaBeans™ component. Thus, -a JdbcRowSet object can be a Bean that any tool -makes available for assembling an application as part of a component based -architecture. A JdbcRowSet object is a connected RowSet -object, that is, it -must continually maintain its connection to its data source using a JDBC -technology-enabled driver ("JDBC driver"). In addition, a JdbcRowSet -object provides a fully updatable and scrollable tabular -data structure as defined in the JDBC 3.0 specification. - -
  • -CachedRowSet - - A CachedRowSet object is a JavaBeans™ - component that is scrollable, updatable, serializable, and generally disconnected from - the source of its data. A CachedRowSet object -typically contains rows from a result set, but it can also contain rows from any -file with a tabular format, such as a spreadsheet. CachedRowSet implementations -must use the SyncFactory to manage and obtain pluggable -SyncProvider objects to provide synchronization between the -disconnected RowSet object and the originating data source. -Typically a SyncProvider implementation relies upon a JDBC -driver to obtain connectivity to a particular data source. -Further details on this mechanism are discussed in the javax.sql.rowset.spi package -specification. - -
  • WebRowSet - A -WebRowSet object is an extension of CachedRowSet -that can read and write a RowSet object in a well formed XML format. -This class calls an XmlReader object -(an extension of the RowSetReader -interface) to read a rowset in XML format. It calls an -XmlWriter object (an extension of the -RowSetWriter interface) -to write a rowset in XML format. The reader and writer required by -WebRowSet objects are provided by the -SyncFactory in the form of SyncProvider -implementations. In order to ensure well formed XML usage, a standard generic XML -Schema is defined and published at - -http://java.sun.com/xml/ns/jdbc/webrowset.xsd. - -
  • FilteredRowSet - A -FilteredRowSet object provides filtering functionality in a programmatic -and extensible way. There are many instances when a RowSet object -has a need to provide filtering in its contents without sacrificing the disconnected -environment, thus saving the expense of having to create a connection to the data source. -Solutions to this need vary from providing heavyweight full scale -SQL query abilities, to portable components, to more lightweight -approaches. A FilteredRowSet object consumes -an implementation of the Predicate -interface, which may define a filter at run time. In turn, a -FilteredRowSet object is tasked with enforcing the set filter for both -inbound and outbound read and write operations. That is, all filters can be -considered as bi-directional. No standard filters are defined; -however, sufficient mechanics are specified to permit any required filter to be -implemented. - -
  • JoinRowSet - The JoinRowSet -interface describes a mechanism by which relationships can be established between -two or more standard RowSet implementations. Any number of RowSet - objects can be added to a JoinRowSet object provided the RowSetobjects -can be related in a SQL JOIN like fashion. By definition, the SQL JOIN -statement is used to combine the data contained in two (or more) relational -database tables based upon a common attribute. By establishing and then enforcing -column matches, a JoinRowSet object establishes relationships between -RowSet instances without the need to touch the originating data source. -
- -

3.0 Implementer's Guide

-Compliant implementations of JDBC RowSet Implementations -must follow the assertions described in this specification. In accordance -with the terms of the Java Community Process, a -Test Compatibility Kit (TCK) can be licensed to ensure compatibility with the -specification. The following paragraphs outline a number of starting points for -implementers of the standard JDBC RowSet definitions. Implementers -should also consult the Implementer's Guide in the javax.sql.rowset.spi package for guidelines -on SyncProvider implementations. - -
    -
  • 3.1 Constructor -

    - All RowSet implementations must provide a -no-argument constructor. -

  • -
  • 3.2 Role of the BaseRowSet Class -

    -A compliant JDBC RowSet implementation must implement one or more -standard interfaces specified in this package and may extend the -BaseRowSet abstract class. For example, a -CachedRowSet implementation must implement the CachedRowSet -interface and extend the BaseRowSet abstract class. The -BaseRowSet class provides the standard architecture on which all -RowSet implementations should be built, regardless of whether the -RowSet objects exist in a connected or disconnected environment. -The BaseRowSet abstract class provides any RowSet implementation -with its base functionality, including property manipulation and event notification -that is fully compliant with JavaBeans -component requirements. As an example, all implementations provided in the -reference implementations (contained in the com.sun.rowset package) use -the BaseRowSet class as a basis for their implementations. -

    -The following table illustrates the features that the BaseRowSet -abstract class provides. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - -
    Features in BaseRowSet
    FeatureDetails
    PropertiesProvides standard JavaBeans property manipulation -mechanisms to allow applications to get and set RowSet command and -property values. Refer to the documentation of the javax.sql.RowSet -interface (available in the JDBC 3.0 specification) for more details on -the standard RowSet properties.
    Event notificationProvides standard JavaBeans event notifications -to registered event listeners. Refer to the documentation of javax.sql.RowSetEvent -interface (available in the JDBC 3.0 specification) for -more details on how to register and handle standard RowSet events generated -by compliant implementations.
    Setters for a RowSet object's commandProvides a complete set of setter methods - for setting RowSet command parameters.
    StreamsProvides fields for storing of stream instances - in addition to providing a set of constants for stream type designation.
    -
    - -
  • 3.3 Connected RowSet Requirements -

    -The JdbcRowSet describes a RowSet object that must always -be connected to the originating data source. Implementations of the JdbcRowSet -should ensure that this connection is provided solely by a JDBC driver. -Furthermore, RowSet objects that are implementations of the -JdbcRowSet interface and are therefore operating in a connected environment -do not use the SyncFactory to obtain a RowSetReader object -or a RowSetWriter object. They can safely rely on the JDBC driver to -supply their needs by virtue of the presence of an underlying updatable and scrollable -ResultSet implementation. - -

  • -3.4 Disconnected RowSet Requirements -

    -A disconnected RowSet object, such as a CachedRowSet object, -should delegate -connection management to a SyncProvider object provided by the -SyncFactory. To ensure fully disconnected semantics, all -disconnected RowSet objects must ensure -that the original connection made to the data source to populate the RowSet -object is closed to permit the garbage collector to recover and release resources. The -SyncProvider object ensures that the critical JDBC properties are -maintained in order to re-establish a connection to the data source when a -synchronization is required. A disconnected RowSet object should -therefore ensure that no -extraneous references remain on the Connection object. - -

  • 3.5 Role of RowSetMetaDataImpl -

    -The RowsetMetaDataImpl class is a utility class that provides an implementation of the -RowSetMetaData interface, supplying standard setter -method implementations for metadata for both connected and disconnected -RowSet objects. All implementations are free to use this standard -implementation but are not required to do so. - -

  • 3.6 RowSetWarning Class -

    -The RowSetWarning class provides warnings that can be set -on RowSet implementations. -Similar to SQLWarning objects, -RowSetWarning objects are silently chained to the object whose method -caused the warning to be thrown. All RowSet implementations should -ensure that this chaining occurs if a warning is generated and also ensure that the -warnings are available via the getRowSetWarnings method defined in either -the JdbcRowSet interface or the CachedRowSet interface. -After a warning has been retrieved with one of the -getRowSetWarnings methods, the RowSetWarning method -getNextWarning can be called on it to retrieve any warnings that might -be chained on it. If a warning is returned, getNextWarning can be called -on it, and so on until there are no more warnings. - -

  • 3.7 The Joinable Interface -

    -The Joinable interface provides both connected and disconnected -RowSet objects with the capability to be added to a -JoinRowSet object in an SQL JOIN operation. -A RowSet object that has implemented the Joinable -interface can set a match column, retrieve a match column, or unset a match column. -A JoinRowSet object can then use the RowSet object's -match column as a basis for adding the RowSet object. -

  • - -
  • 3.8 The RowSetFactory Interface -

    - A RowSetFactory implementation must - be provided. -

  • -
- -

4.0 Related Specifications

- - -

5.0 Related Documentation

- - - diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/java.sql.rowset/share/classes/javax/sql/rowset/spi/package-info.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/java.sql.rowset/share/classes/javax/sql/rowset/spi/package-info.java Thu Jun 07 15:01:13 2018 +0200 @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/** + * The standard classes and interfaces that a third party vendor has to + * use in its implementation of a synchronization provider. These classes and + * interfaces are referred to as the Service Provider Interface (SPI). To make it possible + * for a RowSet object to use an implementation, the vendor must register + * it with the SyncFactory singleton. (See the class comment for + * SyncProvider for a full explanation of the registration process and + * the naming convention to be used.) + * + *

Table of Contents

+ * + * + *

1.0 Package Specification

+ *

+ * The following classes and interfaces make up the javax.sql.rowset.spi + * package: + *

    + *
  • SyncFactory + *
  • SyncProvider + *
  • SyncFactoryException + *
  • SyncProviderException + *
  • SyncResolver + *
  • XmlReader + *
  • XmlWriter + *
  • TransactionalWriter + *
+ * The following interfaces, in the javax.sql package, are also part of the SPI: + *
    + *
  • RowSetReader + *
  • RowSetWriter + *
+ *

+ * A SyncProvider implementation provides a disconnected RowSet + * object with the mechanisms for reading data into it and for writing data that has been + * modified in it + * back to the underlying data source. A reader, a RowSetReader or + * XMLReader object, reads data into a RowSet object when the + * CachedRowSet methods execute or populate + * are called. A writer, a RowSetWriter or XMLWriter + * object, writes changes back to the underlying data source when the + * CachedRowSet method acceptChanges is called. + *

+ * The process of writing changes in a RowSet object to its data source + * is known as synchronization. The SyncProvider implementation that a + * RowSet object is using determines the level of synchronization that the + * RowSet object's writer uses. The various levels of synchronization are + * referred to as grades. + *

+ * The lower grades of synchronization are + * known as optimistic concurrency levels because they optimistically + * assume that there will be no conflicts or very few conflicts. A conflict exists when + * the same data modified in the RowSet object has also been modified + * in the data source. Using the optimistic concurrency model means that if there + * is a conflict, modifications to either the data source or the RowSet + * object will be lost. + *

+ * Higher grades of synchronization are called pessimistic because they assume + * that others will be accessing the data source and making modifications. These + * grades set varying levels of locks to increase the chances that no conflicts + * occur. + *

+ * The lowest level of synchronization is simply writing any changes made to the + * RowSet object to its underlying data source. The writer does + * nothing to check for conflicts. + * If there is a conflict and the data + * source values are overwritten, the changes other parties have made by to the data + * source are lost. + *

+ * The RIXMLProvider implementation uses the lowest level + * of synchronization and just writes RowSet changes to the data source. + * + *

+ * For the next level up, the + * writer checks to see if there are any conflicts, and if there are, + * it does not write anything to the data source. The problem with this concurrency + * level is that if another party has modified the corresponding data in the data source + * since the RowSet object got its data, + * the changes made to the RowSet object are lost. The + * RIOptimisticProvider implementation uses this level of synchronization. + *

+ * At higher levels of synchronization, referred to as pessimistic concurrency, + * the writer take steps to avoid conflicts by setting locks. Setting locks + * can vary from setting a lock on a single row to setting a lock on a table + * or the entire data source. The level of synchronization is therefore a tradeoff + * between the ability of users to access the data source concurrently and the ability + * of the writer to keep the data in the RowSet object and its data source + * synchronized. + *

+ * It is a requirement that all disconnected RowSet objects + * (CachedRowSet, FilteredRowSet, JoinRowSet, + * and WebRowSet objects) obtain their SyncProvider objects + * from the SyncFactory mechanism. + *

+ * The reference implementation (RI) provides two synchronization providers. + *

    + *
  • RIOptimisticProvider
    + * The default provider that the SyncFactory instance will + * supply to a disconnected RowSet object when no provider + * implementation is specified.
    + * This synchronization provider uses an optimistic concurrency model, + * assuming that there will be few conflicts among users + * who are accessing the same data in a database. It avoids + * using locks; rather, it checks to see if there is a conflict + * before trying to synchronize the RowSet object and the + * data source. If there is a conflict, it does nothing, meaning that + * changes to the RowSet object are not persisted to the data + * source. + *
  • RIXMLProvider
    + * A synchronization provider that can be used with a + * WebRowSet object, which is a rowset that can be written + * in XML format or read from XML format. The + * RIXMLProvider implementation does no checking at all for + * conflicts and simply writes any updated data in the + * WebRowSet object to the underlying data source. + * WebRowSet objects use this provider when they are + * dealing with XML data. + *
+ * + * These SyncProvider implementations + * are bundled with the reference implementation, which makes them always available to + * RowSet implementations. + * SyncProvider implementations make themselves available by being + * registered with the SyncFactory singleton. When a RowSet + * object requests a provider, by specifying it in the constructor or as an argument to the + * CachedRowSet method setSyncProvider, + * the SyncFactory singleton + * checks to see if the requested provider has been registered with it. + * If it has, the SyncFactory creates an instance of it and passes it to the + * requesting RowSet object. + * If the SyncProvider implementation that is specified has not been registered, + * the SyncFactory singleton causes a SyncFactoryException object + * to be thrown. If no provider is specified, + * the SyncFactory singleton will create an instance of the default + * provider implementation, RIOptimisticProvider, + * and pass it to the requesting RowSet object. + * + *

+ * If a WebRowSet object does not specify a provider in its constructor, the + * SyncFactory will give it an instance of RIOptimisticProvider. + * However, the constructor for WebRowSet is implemented to set the provider + * to the RIXMLProvider, which reads and writes a RowSet object + * in XML format. + *

+ * See the SyncProvider class + * specification for further details. + *

+ * Vendors may develop a SyncProvider implementation with any one of the possible + * levels of synchronization, thus giving RowSet objects a choice of + * synchronization mechanisms. + * + *

2.0 Service Provider Interface Architecture

+ * 2.1 Overview + *

+ * The Service Provider Interface provides a pluggable mechanism by which + * SyncProvider implementations can be registered and then generated when + * required. The lazy reference mechanism employed by the SyncFactory limits + * unnecessary resource consumption by not creating an instance until it is + * required by a disconnected + * RowSet object. The SyncFactory class also provides + * a standard API to configure logging options and streams that may be provided + * by a particular SyncProvider implementation. + *

+ * 2.2 Registering with the SyncFactory + *

+ * A third party SyncProvider implementation must be registered with the + * SyncFactory in order for a disconnected RowSet object + * to obtain it and thereby use its javax.sql.RowSetReader and + * javax.sql.RowSetWriter + * implementations. The following registration mechanisms are available to all + * SyncProvider implementations: + *

    + *
  • System properties - Properties set at the command line. These + * properties are set at run time and apply system-wide per invocation of the Java + * application. See the section "Related Documentation" + * further related information. + * + *
  • Property Files - Properties specified in a standard property file. + * This can be specified using a System Property or by modifying a standard + * property file located in the platform run-time. The + * reference implementation of this technology includes a standard property + * file than can be edited to add additional SyncProvider objects. + * + *
  • JNDI Context - Available providers can be registered on a JNDI + * context. The SyncFactory will attempt to load SyncProvider + * objects bound to the context and register them with the factory. This + * context must be supplied to the SyncFactory for the mechanism to + * function correctly. + *
+ *

+ * Details on how to specify the system properties or properties in a property file + * and how to configure the JNDI Context are explained in detail in the + * SyncFactory class description. + *

+ * 2.3 SyncFactory Provider Instance Generation Policies + *

+ * The SyncFactory generates a requested SyncProvider + * object if the provider has been correctly registered. The + * following policies are adhered to when either a disconnected RowSet object + * is instantiated with a specified SyncProvider implementation or is + * reconfigured at runtime with an alternative SyncProvider object. + *

    + *
  • If a SyncProvider object is specified and the SyncFactory + * contains no reference to the provider, a SyncFactoryException is + * thrown. + * + *
  • If a SyncProvider object is specified and the SyncFactory + * contains a reference to the provider, the requested provider is supplied. + * + *
  • If no SyncProvider object is specified, the reference + * implementation provider RIOptimisticProvider is supplied. + *
+ *

+ * These policies are explored in more detail in the + * SyncFactory class. + * + *

3.0 SyncProvider Implementer's Guide

+ * + * 3.1 Requirements + *

+ * A compliant SyncProvider implementation that is fully pluggable + * into the SyncFactory must extend and implement all + * abstract methods in the SyncProvider + * class. In addition, an implementation must determine the + * grade, locking and updatable view capabilities defined in the + * SyncProvider class definition. One or more of the + * SyncProvider description criteria must be supported. It + * is expected that vendor implementations will offer a range of grade, locking, and + * updatable view capabilities. + *

+ * Furthermore, the SyncProvider naming convention must be followed as + * detailed in the SyncProvider class + * description. + *

+ * 3.2 Grades + *

+ * JSR 114 defines a set of grades to describe the quality of synchronization + * a SyncProvider object can offer a disconnected RowSet + * object. These grades are listed from the lowest quality of service to the highest. + *

    + *
  • GRADE_NONE - No synchronization with the originating data source is + * provided. A SyncProvider implementation returning this grade will simply + * attempt to write any data that has changed in the RowSet object to the + *underlying data source, overwriting whatever is there. No attempt is made to compare + * original values with current values to see if there is a conflict. The + * RIXMLProvider is implemented with this grade. + * + *
  • GRADE_CHECK_MODIFIED_AT_COMMIT - A low grade of optimistic synchronization. + * A SyncProvider implementation returning this grade + * will check for conflicts in rows that have changed between the last synchronization + * and the current synchronization under way. Any changes in the originating data source + * that have been modified will not be reflected in the disconnected RowSet + * object. If there are no conflicts, changes in the RowSet object will be + * written to the data source. If there are conflicts, no changes are written. + * The RIOptimisticProvider implementation uses this grade. + * + *
  • GRADE_CHECK_ALL_AT_COMMIT - A high grade of optimistic synchronization. + * A SyncProvider implementation returning this grade + * will check all rows, including rows that have not changed in the disconnected + * RowSet object. In this way, any changes to rows in the underlying + * data source will be reflected in the disconnected RowSet object + * when the synchronization finishes successfully. + * + *
  • GRADE_LOCK_WHEN_MODIFIED - A pessimistic grade of synchronization. + * SyncProvider implementations returning this grade will lock + * the row in the originating data source that corresponds to the row being changed + * in the RowSet object to reduce the possibility of other + * processes modifying the same data in the data source. + * + *
  • GRADE_LOCK_WHEN_LOADED - A higher pessimistic synchronization grade. + * A SyncProvider implementation returning this grade will lock + * the entire view and/or table affected by the original query used to + * populate a RowSet object. + *
+ *

+ * 3.3 Locks + *

+ * JSR 114 defines a set of constants that specify whether any locks have been + * placed on a RowSet object's underlying data source and, if so, + * on which constructs the locks are placed. These locks will remain on the data + * source while the RowSet object is disconnected from the data source. + *

+ * These constants should be considered complementary to the + * grade constants. The default setting for the majority of grade settings requires + * that no data source locks remain when a RowSet object is disconnected + * from its data source. + * The grades GRADE_LOCK_WHEN_MODIFIED and + * GRADE_LOCK_WHEN_LOADED allow a disconnected RowSet object + * to have a fine-grained control over the degree of locking. + *

    + *
  • DATASOURCE_NO_LOCK - No locks remain on the originating data source. + * This is the default lock setting for all SyncProvider implementations + * unless otherwise directed by a RowSet object. + * + *
  • DATASOURCE_ROW_LOCK - A lock is placed on the rows that are touched by + * the original SQL query used to populate the RowSet object. + * + *
  • DATASOURCE_TABLE_LOCK - A lock is placed on all tables that are touched + * by the query that was used to populate the RowSet object. + * + *
  • DATASOURCE_DB_LOCK + * A lock is placed on the entire data source that is used by the RowSet + * object. + *
+ *

+ * 3.4 Updatable Views + *

+ * A RowSet object may be populated with data from an SQL VIEW. + * The following constants indicate whether a SyncProvider object can + * update data in the table or tables from which the VIEW was derived. + *

    + *
  • UPDATABLE_VIEW_SYNC + * Indicates that a SyncProvider implementation supports synchronization + * to the table or tables from which the SQL VIEW used to populate + * a RowSet object is derived. + * + *
  • NONUPDATABLE_VIEW_SYNC + * Indicates that a SyncProvider implementation does not support + * synchronization to the table or tables from which the SQL VIEW + * used to populate a RowSet object is derived. + *
+ *

+ * 3.5 Usage of SyncProvider Grading and Locking + *

+ * In the example below, the reference CachedRowSetImpl implementation + * reconfigures its current SyncProvider object by calling the + * setSyncProvider method.
+ * + *

+ *   CachedRowSetImpl crs = new CachedRowSetImpl();
+ *   crs.setSyncProvider("com.foo.bar.HASyncProvider");
+ * 
+ * An application can retrieve the SyncProvider object currently in use + * by a disconnected RowSet object. It can also retrieve the + * grade of synchronization with which the provider was implemented and the degree of + * locking currently in use. In addition, an application has the flexibility to set + * the degree of locking to be used, which can increase the possibilities for successful + * synchronization. These operation are shown in the following code fragment. + *
+ *   SyncProvider sync = crs.getSyncProvider();
+ *
+ *   switch (sync.getProviderGrade()) {
+ *   case: SyncProvider.GRADE_CHECK_ALL_AT_COMMIT
+ *         //A high grade of optimistic synchronization
+ *    break;
+ *    case: SyncProvider.GRADE_CHECK_MODIFIED_AT_COMMIT
+ *         //A low grade of optimistic synchronization
+ *    break;
+ *    case: SyncProvider.GRADE_LOCK_WHEN_LOADED
+ *         // A pessimistic synchronization grade
+ *    break;
+ *    case: SyncProvider.GRADE_LOCK_WHEN_MODIFIED
+ *         // A pessimistic synchronization grade
+ *    break;
+ *    case: SyncProvider.GRADE_NONE
+ *      // No synchronization with the originating data source provided
+ *    break;
+ *    }
+ *
+ *    switch (sync.getDataSourcLock() {
+ *      case: SyncProvider.DATASOURCE_DB_LOCK
+ *       // A lock is placed on the entire datasource that is used by the
+ *       // RowSet object
+ *       break;
+ *
+ *      case: SyncProvider.DATASOURCE_NO_LOCK
+ *       // No locks remain on the  originating data source.
+ *      break;
+ *
+ *      case: SyncProvider.DATASOURCE_ROW_LOCK
+ *       // A lock is placed on the rows that are  touched by the original
+ *       // SQL statement used to populate
+ *       // the RowSet object that is using the SyncProvider
+ *       break;
+ *
+ *      case: DATASOURCE_TABLE_LOCK
+ *       // A lock is placed on  all tables that are touched by the original
+ *       // SQL statement used to populated
+ *       // the RowSet object that is using the SyncProvider
+ *      break;
+ *
+ * 
+ * It is also possible using the static utility method in the + * SyncFactory class to determine the list of SyncProvider + * implementations currently registered with the SyncFactory. + * + *
+ *       Enumeration e = SyncFactory.getRegisteredProviders();
+ * 
+ * + * + *

4.0 Resolving Synchronization Conflicts

+ * + * The interface SyncResolver provides a way for an application to + * decide manually what to do when a conflict occurs. When the CachedRowSet + * method acceptChanges finishes and has detected one or more conflicts, + * it throws a SyncProviderException object. An application can + * catch the exception and + * have it retrieve a SyncResolver object by calling the method + * SyncProviderException.getSyncResolver(). + *

+ * A SyncResolver object, which is a special kind of + * CachedRowSet object or + * a JdbcRowSet object that has implemented the SyncResolver + * interface, examines the conflicts row by row. It is a duplicate of the + * RowSet object being synchronized except that it contains only the data + * from the data source this is causing a conflict. All of the other column values are + * set to null. To navigate from one conflict value to another, a + * SyncResolver object provides the methods nextConflict and + * previousConflict. + *

+ * The SyncResolver interface also + * provides methods for doing the following: + *

    + *
  • finding out whether the conflict involved an update, a delete, or an insert + *
  • getting the value in the data source that caused the conflict + *
  • setting the value that should be in the data source if it needs to be changed + * or setting the value that should be in the RowSet object if it needs + * to be changed + *
+ *

+ * When the CachedRowSet method acceptChanges is called, it + * delegates to the RowSet object's SyncProvider object. + * How the writer provided by that SyncProvider object is implemented + * determines what level (grade) of checking for conflicts will be done. After all + * checking for conflicts is completed and one or more conflicts has been found, the method + * acceptChanges throws a SyncProviderException object. The + * application can catch the exception and use it to obtain a SyncResolver object. + *

+ * The application can then use SyncResolver methods to get information + * about each conflict and decide what to do. If the application logic or the user + * decides that a value in the RowSet object should be the one to + * persist, the application or user can overwrite the data source value with it. + *

+ * The comment for the SyncResolver interface has more detail. + * + *

5.0 Related Specifications

+ * + *

6.0 Related Documentation

+ * + */ +package javax.sql.rowset.spi; diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/java.sql.rowset/share/classes/javax/sql/rowset/spi/package.html --- a/src/java.sql.rowset/share/classes/javax/sql/rowset/spi/package.html Thu Jun 07 10:48:36 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,493 +0,0 @@ - - - - - - - - - javax.sql.rowset.spi - - - - -The standard classes and interfaces that a third party vendor has to -use in its implementation of a synchronization provider. These classes and -interfaces are referred to as the Service Provider Interface (SPI). To make it possible -for a RowSet object to use an implementation, the vendor must register -it with the SyncFactory singleton. (See the class comment for -SyncProvider for a full explanation of the registration process and -the naming convention to be used.) - -

Table of Contents

- - -

1.0 Package Specification

-

-The following classes and interfaces make up the javax.sql.rowset.spi -package: -

    -
  • SyncFactory -
  • SyncProvider -
  • SyncFactoryException -
  • SyncProviderException -
  • SyncResolver -
  • XmlReader -
  • XmlWriter -
  • TransactionalWriter -
-The following interfaces, in the javax.sql package, are also part of the SPI: -
    -
  • RowSetReader -
  • RowSetWriter -
-

-A SyncProvider implementation provides a disconnected RowSet -object with the mechanisms for reading data into it and for writing data that has been -modified in it -back to the underlying data source. A reader, a RowSetReader or -XMLReader object, reads data into a RowSet object when the -CachedRowSet methods execute or populate -are called. A writer, a RowSetWriter or XMLWriter -object, writes changes back to the underlying data source when the -CachedRowSet method acceptChanges is called. -

-The process of writing changes in a RowSet object to its data source -is known as synchronization. The SyncProvider implementation that a -RowSet object is using determines the level of synchronization that the -RowSet object's writer uses. The various levels of synchronization are -referred to as grades. -

-The lower grades of synchronization are -known as optimistic concurrency levels because they optimistically -assume that there will be no conflicts or very few conflicts. A conflict exists when -the same data modified in the RowSet object has also been modified -in the data source. Using the optimistic concurrency model means that if there -is a conflict, modifications to either the data source or the RowSet -object will be lost. -

-Higher grades of synchronization are called pessimistic because they assume -that others will be accessing the data source and making modifications. These -grades set varying levels of locks to increase the chances that no conflicts -occur. -

-The lowest level of synchronization is simply writing any changes made to the -RowSet object to its underlying data source. The writer does -nothing to check for conflicts. -If there is a conflict and the data -source values are overwritten, the changes other parties have made by to the data -source are lost. -

-The RIXMLProvider implementation uses the lowest level -of synchronization and just writes RowSet changes to the data source. - -

-For the next level up, the -writer checks to see if there are any conflicts, and if there are, -it does not write anything to the data source. The problem with this concurrency -level is that if another party has modified the corresponding data in the data source -since the RowSet object got its data, -the changes made to the RowSet object are lost. The -RIOptimisticProvider implementation uses this level of synchronization. -

-At higher levels of synchronization, referred to as pessimistic concurrency, -the writer take steps to avoid conflicts by setting locks. Setting locks -can vary from setting a lock on a single row to setting a lock on a table -or the entire data source. The level of synchronization is therefore a tradeoff -between the ability of users to access the data source concurrently and the ability -of the writer to keep the data in the RowSet object and its data source -synchronized. -

-It is a requirement that all disconnected RowSet objects -(CachedRowSet, FilteredRowSet, JoinRowSet, -and WebRowSet objects) obtain their SyncProvider objects -from the SyncFactory mechanism. -

-The reference implementation (RI) provides two synchronization providers. -

    -
  • RIOptimisticProvider
    - The default provider that the SyncFactory instance will - supply to a disconnected RowSet object when no provider - implementation is specified.
    - This synchronization provider uses an optimistic concurrency model, - assuming that there will be few conflicts among users - who are accessing the same data in a database. It avoids - using locks; rather, it checks to see if there is a conflict - before trying to synchronize the RowSet object and the - data source. If there is a conflict, it does nothing, meaning that - changes to the RowSet object are not persisted to the data - source. -
  • RIXMLProvider
    - A synchronization provider that can be used with a - WebRowSet object, which is a rowset that can be written - in XML format or read from XML format. The - RIXMLProvider implementation does no checking at all for - conflicts and simply writes any updated data in the - WebRowSet object to the underlying data source. - WebRowSet objects use this provider when they are - dealing with XML data. -
- -These SyncProvider implementations -are bundled with the reference implementation, which makes them always available to -RowSet implementations. -SyncProvider implementations make themselves available by being -registered with the SyncFactory singleton. When a RowSet -object requests a provider, by specifying it in the constructor or as an argument to the -CachedRowSet method setSyncProvider, -the SyncFactory singleton -checks to see if the requested provider has been registered with it. -If it has, the SyncFactory creates an instance of it and passes it to the -requesting RowSet object. -If the SyncProvider implementation that is specified has not been registered, -the SyncFactory singleton causes a SyncFactoryException object -to be thrown. If no provider is specified, -the SyncFactory singleton will create an instance of the default -provider implementation, RIOptimisticProvider, -and pass it to the requesting RowSet object. - -

-If a WebRowSet object does not specify a provider in its constructor, the -SyncFactory will give it an instance of RIOptimisticProvider. -However, the constructor for WebRowSet is implemented to set the provider -to the RIXMLProvider, which reads and writes a RowSet object -in XML format. -

-See the SyncProvider class -specification for further details. -

-Vendors may develop a SyncProvider implementation with any one of the possible -levels of synchronization, thus giving RowSet objects a choice of -synchronization mechanisms. - -

2.0 Service Provider Interface Architecture

-2.1 Overview -

-The Service Provider Interface provides a pluggable mechanism by which -SyncProvider implementations can be registered and then generated when -required. The lazy reference mechanism employed by the SyncFactory limits -unnecessary resource consumption by not creating an instance until it is -required by a disconnected -RowSet object. The SyncFactory class also provides -a standard API to configure logging options and streams that may be provided -by a particular SyncProvider implementation. -

-2.2 Registering with the SyncFactory -

-A third party SyncProvider implementation must be registered with the -SyncFactory in order for a disconnected RowSet object -to obtain it and thereby use its javax.sql.RowSetReader and -javax.sql.RowSetWriter -implementations. The following registration mechanisms are available to all -SyncProvider implementations: -

    -
  • System properties - Properties set at the command line. These -properties are set at run time and apply system-wide per invocation of the Java -application. See the section "Related Documentation" -further related information. - -
  • Property Files - Properties specified in a standard property file. -This can be specified using a System Property or by modifying a standard -property file located in the platform run-time. The -reference implementation of this technology includes a standard property -file than can be edited to add additional SyncProvider objects. - -
  • JNDI Context - Available providers can be registered on a JNDI -context. The SyncFactory will attempt to load SyncProvider -objects bound to the context and register them with the factory. This -context must be supplied to the SyncFactory for the mechanism to -function correctly. -
-

-Details on how to specify the system properties or properties in a property file -and how to configure the JNDI Context are explained in detail in the -SyncFactory class description. -

-2.3 SyncFactory Provider Instance Generation Policies -

-The SyncFactory generates a requested SyncProvider -object if the provider has been correctly registered. The -following policies are adhered to when either a disconnected RowSet object -is instantiated with a specified SyncProvider implementation or is -reconfigured at runtime with an alternative SyncProvider object. -

    -
  • If a SyncProvider object is specified and the SyncFactory -contains no reference to the provider, a SyncFactoryException is -thrown. - -
  • If a SyncProvider object is specified and the SyncFactory -contains a reference to the provider, the requested provider is supplied. - -
  • If no SyncProvider object is specified, the reference -implementation provider RIOptimisticProvider is supplied. -
-

-These policies are explored in more detail in the -SyncFactory class. - -

3.0 SyncProvider Implementer's Guide

- -3.1 Requirements -

-A compliant SyncProvider implementation that is fully pluggable -into the SyncFactory must extend and implement all -abstract methods in the SyncProvider -class. In addition, an implementation must determine the -grade, locking and updatable view capabilities defined in the -SyncProvider class definition. One or more of the -SyncProvider description criteria must be supported. It -is expected that vendor implementations will offer a range of grade, locking, and -updatable view capabilities. -

-Furthermore, the SyncProvider naming convention must be followed as -detailed in the SyncProvider class -description. -

-3.2 Grades -

-JSR 114 defines a set of grades to describe the quality of synchronization -a SyncProvider object can offer a disconnected RowSet -object. These grades are listed from the lowest quality of service to the highest. -

    -
  • GRADE_NONE - No synchronization with the originating data source is -provided. A SyncProvider implementation returning this grade will simply -attempt to write any data that has changed in the RowSet object to the -underlying data source, overwriting whatever is there. No attempt is made to compare -original values with current values to see if there is a conflict. The -RIXMLProvider is implemented with this grade. - -
  • GRADE_CHECK_MODIFIED_AT_COMMIT - A low grade of optimistic synchronization. -A SyncProvider implementation returning this grade -will check for conflicts in rows that have changed between the last synchronization -and the current synchronization under way. Any changes in the originating data source -that have been modified will not be reflected in the disconnected RowSet -object. If there are no conflicts, changes in the RowSet object will be -written to the data source. If there are conflicts, no changes are written. -The RIOptimisticProvider implementation uses this grade. - -
  • GRADE_CHECK_ALL_AT_COMMIT - A high grade of optimistic synchronization. -A SyncProvider implementation returning this grade -will check all rows, including rows that have not changed in the disconnected -RowSet object. In this way, any changes to rows in the underlying -data source will be reflected in the disconnected RowSet object -when the synchronization finishes successfully. - -
  • GRADE_LOCK_WHEN_MODIFIED - A pessimistic grade of synchronization. -SyncProvider implementations returning this grade will lock -the row in the originating data source that corresponds to the row being changed -in the RowSet object to reduce the possibility of other -processes modifying the same data in the data source. - -
  • GRADE_LOCK_WHEN_LOADED - A higher pessimistic synchronization grade. -A SyncProvider implementation returning this grade will lock -the entire view and/or table affected by the original query used to -populate a RowSet object. -
-

-3.3 Locks -

-JSR 114 defines a set of constants that specify whether any locks have been -placed on a RowSet object's underlying data source and, if so, -on which constructs the locks are placed. These locks will remain on the data -source while the RowSet object is disconnected from the data source. -

-These constants should be considered complementary to the -grade constants. The default setting for the majority of grade settings requires -that no data source locks remain when a RowSet object is disconnected -from its data source. -The grades GRADE_LOCK_WHEN_MODIFIED and -GRADE_LOCK_WHEN_LOADED allow a disconnected RowSet object -to have a fine-grained control over the degree of locking. -

    -
  • DATASOURCE_NO_LOCK - No locks remain on the originating data source. -This is the default lock setting for all SyncProvider implementations -unless otherwise directed by a RowSet object. - -
  • DATASOURCE_ROW_LOCK - A lock is placed on the rows that are touched by -the original SQL query used to populate the RowSet object. - -
  • DATASOURCE_TABLE_LOCK - A lock is placed on all tables that are touched -by the query that was used to populate the RowSet object. - -
  • DATASOURCE_DB_LOCK -A lock is placed on the entire data source that is used by the RowSet -object. -
-

-3.4 Updatable Views -

-A RowSet object may be populated with data from an SQL VIEW. -The following constants indicate whether a SyncProvider object can -update data in the table or tables from which the VIEW was derived. -

    -
  • UPDATABLE_VIEW_SYNC -Indicates that a SyncProvider implementation supports synchronization -to the table or tables from which the SQL VIEW used to populate -a RowSet object is derived. - -
  • NONUPDATABLE_VIEW_SYNC -Indicates that a SyncProvider implementation does not support -synchronization to the table or tables from which the SQL VIEW -used to populate a RowSet object is derived. -
-

-3.5 Usage of SyncProvider Grading and Locking -

-In the example below, the reference CachedRowSetImpl implementation -reconfigures its current SyncProvider object by calling the -setSyncProvider method.
- -

-    CachedRowSetImpl crs = new CachedRowSetImpl();
-    crs.setSyncProvider("com.foo.bar.HASyncProvider");
-
- An application can retrieve the SyncProvider object currently in use -by a disconnected RowSet object. It can also retrieve the -grade of synchronization with which the provider was implemented and the degree of -locking currently in use. In addition, an application has the flexibility to set -the degree of locking to be used, which can increase the possibilities for successful -synchronization. These operation are shown in the following code fragment. -
-    SyncProvider sync = crs.getSyncProvider();
-
-    switch (sync.getProviderGrade()) {
-    case: SyncProvider.GRADE_CHECK_ALL_AT_COMMIT
-         //A high grade of optimistic synchronization
-    break;
-    case: SyncProvider.GRADE_CHECK_MODIFIED_AT_COMMIT
-         //A low grade of optimistic synchronization
-    break;
-    case: SyncProvider.GRADE_LOCK_WHEN_LOADED
-         // A pessimistic synchronization grade
-    break;
-    case: SyncProvider.GRADE_LOCK_WHEN_MODIFIED
-         // A pessimistic synchronization grade
-    break;
-    case: SyncProvider.GRADE_NONE
-      // No synchronization with the originating data source provided
-    break;
-    }
-
-    switch (sync.getDataSourcLock() {
-      case: SyncProvider.DATASOURCE_DB_LOCK
-       // A lock is placed on the entire datasource that is used by the
-       // RowSet object
-       break;
-
-      case: SyncProvider.DATASOURCE_NO_LOCK
-       // No locks remain on the  originating data source.
-      break;
-
-      case: SyncProvider.DATASOURCE_ROW_LOCK
-       // A lock is placed on the rows that are  touched by the original
-       // SQL statement used to populate
-       // the RowSet object that is using the SyncProvider
-       break;
-
-      case: DATASOURCE_TABLE_LOCK
-       // A lock is placed on  all tables that are touched by the original
-       // SQL statement used to populated
-       // the RowSet object that is using the SyncProvider
-       break;
-
-
- It is also possible using the static utility method in the -SyncFactory class to determine the list of SyncProvider -implementations currently registered with the SyncFactory. - -
-       Enumeration e = SyncFactory.getRegisteredProviders();
-
- - -

4.0 Resolving Synchronization Conflicts

- -The interface SyncResolver provides a way for an application to -decide manually what to do when a conflict occurs. When the CachedRowSet -method acceptChanges finishes and has detected one or more conflicts, -it throws a SyncProviderException object. An application can -catch the exception and -have it retrieve a SyncResolver object by calling the method -SyncProviderException.getSyncResolver(). -

-A SyncResolver object, which is a special kind of -CachedRowSet object or -a JdbcRowSet object that has implemented the SyncResolver -interface, examines the conflicts row by row. It is a duplicate of the -RowSet object being synchronized except that it contains only the data -from the data source this is causing a conflict. All of the other column values are -set to null. To navigate from one conflict value to another, a -SyncResolver object provides the methods nextConflict and -previousConflict. -

-The SyncResolver interface also -provides methods for doing the following: -

    -
  • finding out whether the conflict involved an update, a delete, or an insert -
  • getting the value in the data source that caused the conflict -
  • setting the value that should be in the data source if it needs to be changed - or setting the value that should be in the RowSet object if it needs - to be changed -
-

-When the CachedRowSet method acceptChanges is called, it -delegates to the RowSet object's SyncProvider object. -How the writer provided by that SyncProvider object is implemented -determines what level (grade) of checking for conflicts will be done. After all -checking for conflicts is completed and one or more conflicts has been found, the method -acceptChanges throws a SyncProviderException object. The -application can catch the exception and use it to obtain a SyncResolver object. -

-The application can then use SyncResolver methods to get information -about each conflict and decide what to do. If the application logic or the user -decides that a value in the RowSet object should be the one to -persist, the application or user can overwrite the data source value with it. -

-The comment for the SyncResolver interface has more detail. - -

5.0 Related Specifications

- -

6.0 Related Documentation

- - - - diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/StringTable.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/StringTable.java Thu Jun 07 10:48:36 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,77 +0,0 @@ -/* - * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -package sun.jvm.hotspot.memory; - -import java.io.*; -import java.util.*; -import sun.jvm.hotspot.debugger.*; -import sun.jvm.hotspot.oops.*; -import sun.jvm.hotspot.types.*; -import sun.jvm.hotspot.runtime.*; -import sun.jvm.hotspot.utilities.*; - -public class StringTable extends sun.jvm.hotspot.utilities.Hashtable { - static { - VM.registerVMInitializedObserver(new Observer() { - public void update(Observable o, Object data) { - initialize(VM.getVM().getTypeDataBase()); - } - }); - } - - private static synchronized void initialize(TypeDataBase db) { - Type type = db.lookupType("StringTable"); - theTableField = type.getAddressField("_the_table"); - } - - // Fields - private static AddressField theTableField; - - // Accessors - public static StringTable getTheTable() { - Address tmp = theTableField.getValue(); - return (StringTable) VMObjectFactory.newObject(StringTable.class, tmp); - } - - public StringTable(Address addr) { - super(addr); - } - - public interface StringVisitor { - public void visit(Instance string); - } - - public void stringsDo(StringVisitor visitor) { - ObjectHeap oh = VM.getVM().getObjectHeap(); - int numBuckets = tableSize(); - for (int i = 0; i < numBuckets; i++) { - for (HashtableEntry e = (HashtableEntry) bucket(i); e != null; - e = (HashtableEntry) e.next()) { - Instance s = (Instance)oh.newOop(e.literalValue().addOffsetToAsOopHandle(0)); - visitor.visit(s); - } - } - } -} diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Jun 07 10:48:36 2018 +0200 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Jun 07 15:01:13 2018 +0200 @@ -79,7 +79,6 @@ private Universe universe; private ObjectHeap heap; private SymbolTable symbols; - private StringTable strings; private SystemDictionary dict; private ClassLoaderDataGraph cldGraph; private Threads threads; @@ -655,13 +654,6 @@ return symbols; } - public StringTable getStringTable() { - if (strings == null) { - strings = StringTable.getTheTable(); - } - return strings; - } - public SystemDictionary getSystemDictionary() { if (dict == null) { dict = new SystemDictionary(); diff -r f0d5c39dfbc1 -r 28b415bc6f4d src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Jun 07 10:48:36 2018 +0200 +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Jun 07 15:01:13 2018 +0200 @@ -129,7 +129,6 @@ } System.out.println(); - printInternStringStatistics(); } // Helper methods @@ -258,41 +257,4 @@ return -1; } } - - private void printInternStringStatistics() { - class StringStat implements StringTable.StringVisitor { - private int count; - private long size; - private OopField stringValueField; - - StringStat() { - VM vm = VM.getVM(); - SystemDictionary sysDict = vm.getSystemDictionary(); - InstanceKlass strKlass = sysDict.getStringKlass(); - // String has a field named 'value' of type 'byte[]'. - stringValueField = (OopField) strKlass.findField("value", "[B"); - } - - private long stringSize(Instance instance) { - // We include String content in size calculation. - return instance.getObjectSize() + - stringValueField.getValue(instance).getObjectSize(); - } - - public void visit(Instance str) { - count++; - size += stringSize(str); - } - - public void print() { - System.out.println(count + - " interned Strings occupying " + size + " bytes."); - } - } - - StringStat stat = new StringStat(); - StringTable strTable = VM.getVM().getStringTable(); - strTable.stringsDo(stat); - stat.print(); - } } diff -r f0d5c39dfbc1 -r 28b415bc6f4d test/hotspot/gtest/utilities/test_concurrentHashtable.cpp --- a/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp Thu Jun 07 10:48:36 2018 +0200 +++ b/test/hotspot/gtest/utilities/test_concurrentHashtable.cpp Thu Jun 07 15:01:13 2018 +0200 @@ -265,6 +265,40 @@ delete cht; } +struct ChtCountScan { + size_t _count; + ChtCountScan() : _count(0) {} + bool operator()(uintptr_t* val) { + _count++; + return true; /* continue scan */ + } +}; + +static void cht_move_to(Thread* thr) { + uintptr_t val1 = 0x2; + uintptr_t val2 = 0xe0000002; + uintptr_t val3 = 0x3; + SimpleTestLookup stl1(val1), stl2(val2), stl3(val3); + SimpleTestTable* from_cht = new SimpleTestTable(); + EXPECT_TRUE(from_cht->insert(thr, stl1, val1)) << "Insert unique value failed."; + EXPECT_TRUE(from_cht->insert(thr, stl2, val2)) << "Insert unique value failed."; + EXPECT_TRUE(from_cht->insert(thr, stl3, val3)) << "Insert unique value failed."; + + SimpleTestTable* to_cht = new SimpleTestTable(); + EXPECT_TRUE(from_cht->try_move_nodes_to(thr, to_cht)) << "Moving nodes to new table failed"; + + ChtCountScan scan_old; + EXPECT_TRUE(from_cht->try_scan(thr, scan_old)) << "Scanning table should work."; + EXPECT_EQ(scan_old._count, (size_t)0) << "All items should be moved"; + + ChtCountScan scan_new; + EXPECT_TRUE(to_cht->try_scan(thr, scan_new)) << "Scanning table should work."; + EXPECT_EQ(scan_new._count, (size_t)3) << "All items should be moved"; + EXPECT_TRUE(to_cht->get_copy(thr, stl1) == val1) << "Getting an inserted value should work."; + EXPECT_TRUE(to_cht->get_copy(thr, stl2) == val2) << "Getting an inserted value should work."; + EXPECT_TRUE(to_cht->get_copy(thr, stl3) == val3) << "Getting an inserted value should work."; +} + static void cht_grow(Thread* thr) { uintptr_t val = 0x2; uintptr_t val2 = 0x22; @@ -371,6 +405,10 @@ nomt_test_doer(cht_scan); } +TEST_VM(ConcurrentHashTable, basic_move_to) { + nomt_test_doer(cht_move_to); +} + TEST_VM(ConcurrentHashTable, basic_grow) { nomt_test_doer(cht_grow); } diff -r f0d5c39dfbc1 -r 28b415bc6f4d test/hotspot/jtreg/serviceability/dcmd/vm/ClassLoaderHierarchyTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/hotspot/jtreg/serviceability/dcmd/vm/ClassLoaderHierarchyTest.java Thu Jun 07 15:01:13 2018 +0200 @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, SAP SE. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @summary Test of diagnostic command VM.classloaders + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.compiler + * java.management + * jdk.internal.jvmstat/sun.jvmstat.monitor + * @run testng ClassLoaderHierarchyTest + */ + +import org.testng.Assert; +import org.testng.annotations.Test; + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.dcmd.CommandExecutor; +import jdk.test.lib.dcmd.JMXExecutor; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; + +public class ClassLoaderHierarchyTest { + +//+-- +// | +// +-- "platform", jdk.internal.loader.ClassLoaders$PlatformClassLoader +// | | +// | +-- "app", jdk.internal.loader.ClassLoaders$AppClassLoader +// | +// +-- jdk.internal.reflect.DelegatingClassLoader +// | +// +-- "Kevin", ClassLoaderHierarchyTest$TestClassLoader +// | +// +-- ClassLoaderHierarchyTest$TestClassLoader +// | +// +-- "Bill", ClassLoaderHierarchyTest$TestClassLoader + + public void run(CommandExecutor executor) throws ClassNotFoundException { + + ClassLoader unnamed_cl = new TestClassLoader(null, null); + Class c1 = Class.forName("TestClass2", true, unnamed_cl); + if (c1.getClassLoader() != unnamed_cl) { + Assert.fail("TestClass defined by wrong classloader: " + c1.getClassLoader()); + } + + ClassLoader named_cl = new TestClassLoader("Kevin", null); + Class c2 = Class.forName("TestClass2", true, named_cl); + if (c2.getClassLoader() != named_cl) { + Assert.fail("TestClass defined by wrong classloader: " + c2.getClassLoader()); + } + + ClassLoader named_child_cl = new TestClassLoader("Bill", unnamed_cl); + Class c3 = Class.forName("TestClass2", true, named_child_cl); + if (c3.getClassLoader() != named_child_cl) { + Assert.fail("TestClass defined by wrong classloader: " + c3.getClassLoader()); + } + + // First test: simple output, no classes displayed + OutputAnalyzer output = executor.execute("VM.classloaders"); + output.shouldContain(""); + output.shouldMatch(".*TestClassLoader"); + output.shouldMatch("Kevin.*TestClassLoader"); + output.shouldMatch("Bill.*TestClassLoader"); + + // Second test: print with classes. + output = executor.execute("VM.classloaders show-classes"); + output.shouldContain(""); + output.shouldContain("java.lang.Object"); + output.shouldMatch(".*TestClassLoader"); + output.shouldMatch("Kevin.*TestClassLoader"); + output.shouldMatch("Bill.*TestClassLoader"); + output.shouldContain("TestClass2"); + } + + static class TestClassLoader extends ClassLoader { + + public TestClassLoader() { + super(); + } + + public TestClassLoader(String name, ClassLoader parent) { + super(name, parent); + } + + public static final String CLASS_NAME = "TestClass2"; + + static ByteBuffer readClassFile(String name) + { + File f = new File(System.getProperty("test.classes", "."), + name); + try (FileInputStream fin = new FileInputStream(f); + FileChannel fc = fin.getChannel()) + { + return fc.map(FileChannel.MapMode.READ_ONLY, 0, fc.size()); + } catch (IOException e) { + Assert.fail("Can't open file: " + name, e); + } + + /* Will not reach here as Assert.fail() throws exception */ + return null; + } + + protected Class loadClass(String name, boolean resolve) + throws ClassNotFoundException + { + Class c; + if (!CLASS_NAME.equals(name)) { + c = super.loadClass(name, resolve); + } else { + // should not delegate to the system class loader + c = findClass(name); + if (resolve) { + resolveClass(c); + } + } + return c; + } + + protected Class findClass(String name) + throws ClassNotFoundException + { + if (!CLASS_NAME.equals(name)) { + throw new ClassNotFoundException("Unexpected class: " + name); + } + return defineClass(name, readClassFile(name + ".class"), null); + } + + } + + @Test + public void jmx() throws ClassNotFoundException { + run(new JMXExecutor()); + } + +} + +class TestClass2 { + static { + Runnable r = () -> System.out.println("Hello"); + r.run(); + } +} + diff -r f0d5c39dfbc1 -r 28b415bc6f4d test/jdk/java/nio/channels/SelectionKey/AtomicUpdates.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/jdk/java/nio/channels/SelectionKey/AtomicUpdates.java Thu Jun 07 15:01:13 2018 +0200 @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test + * @bug 6350055 + * @run testng AtomicUpdates + * @summary Unit test for SelectionKey interestOpsOr and interestOpsAnd + */ + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.SelectableChannel; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import org.testng.annotations.Test; + +import static java.nio.channels.SelectionKey.OP_READ; +import static java.nio.channels.SelectionKey.OP_WRITE; +import static java.nio.channels.SelectionKey.OP_CONNECT; +import static java.nio.channels.SelectionKey.OP_ACCEPT; +import static org.testng.Assert.*; + +@Test +public class AtomicUpdates { + + private SelectionKey keyFor(SocketChannel sc) { + return new SelectionKey() { + private int ops; + private boolean invalid; + private void ensureValid() { + if (!isValid()) + throw new CancelledKeyException(); + } + @Override + public SelectableChannel channel() { + return sc; + } + @Override + public Selector selector() { + throw new RuntimeException(); + } + @Override + public boolean isValid() { + return !invalid; + } + @Override + public void cancel() { + invalid = true; + } + @Override + public int interestOps() { + ensureValid(); + return ops; + } + @Override + public SelectionKey interestOps(int ops) { + ensureValid(); + if ((ops & ~channel().validOps()) != 0) + throw new IllegalArgumentException(); + this.ops = ops; + return this; + } + @Override + public int readyOps() { + ensureValid(); + return 0; + } + }; + } + + private void test(SelectionKey key) { + assertTrue(key.channel() instanceof SocketChannel); + key.interestOps(0); + + // 0 -> 0 + int previous = key.interestOpsOr(0); + assertTrue(previous == 0); + assertTrue(key.interestOps() == 0); + + // 0 -> OP_CONNECT + previous = key.interestOpsOr(OP_CONNECT); + assertTrue(previous == 0); + assertTrue(key.interestOps() == OP_CONNECT); + + // OP_CONNECT -> OP_CONNECT + previous = key.interestOpsOr(0); + assertTrue(previous == OP_CONNECT); + assertTrue(key.interestOps() == OP_CONNECT); + + // OP_CONNECT -> OP_CONNECT | OP_READ | OP_WRITE + previous = key.interestOpsOr(OP_READ | OP_WRITE); + assertTrue(previous == OP_CONNECT); + assertTrue(key.interestOps() == (OP_CONNECT | OP_READ | OP_WRITE)); + + // OP_CONNECT | OP_READ | OP_WRITE -> OP_CONNECT + previous = key.interestOpsAnd(~(OP_READ | OP_WRITE)); + assertTrue(previous == (OP_CONNECT | OP_READ | OP_WRITE)); + assertTrue(key.interestOps() == OP_CONNECT); + + // OP_CONNECT -> 0 + previous = key.interestOpsAnd(~OP_CONNECT); + assertTrue(previous == OP_CONNECT); + assertTrue(key.interestOps() == 0); + + // OP_READ | OP_WRITE -> OP_READ | OP_WRITE + key.interestOps(OP_READ | OP_WRITE); + previous = key.interestOpsAnd(~OP_ACCEPT); + assertTrue(previous == (OP_READ | OP_WRITE)); + assertTrue(key.interestOps() == (OP_READ | OP_WRITE)); + + // OP_READ | OP_WRITE -> 0 + previous = key.interestOpsAnd(0); + assertTrue(previous == (OP_READ | OP_WRITE)); + assertTrue(key.interestOps() == 0); + + // 0 -> 0 + previous = key.interestOpsAnd(0); + assertTrue(previous == 0); + assertTrue(key.interestOps() == 0); + + try { + key.interestOpsOr(OP_ACCEPT); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException expected) { } + + key.cancel(); + try { + key.interestOpsOr(OP_READ); + fail("CancelledKeyException expected"); + } catch (CancelledKeyException expected) { } + try { + key.interestOpsAnd(~OP_READ); + fail("CancelledKeyException expected"); + } catch (CancelledKeyException expected) { } + } + + /** + * Test default implementation of interestOpsOr/interestOpsAnd + */ + public void testDefaultImplementation() throws Exception { + try (SocketChannel sc = SocketChannel.open()) { + SelectionKey key = keyFor(sc); + test(key); + } + } + + /** + * Test the default provider implementation of SelectionKey. + */ + public void testNioImplementation() throws Exception { + try (SocketChannel sc = SocketChannel.open(); + Selector sel = Selector.open()) { + sc.configureBlocking(false); + SelectionKey key = sc.register(sel, 0); + test(key); + } + } +} +