--- a/jdk/src/macosx/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/macosx/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -520,14 +520,22 @@
}
private static boolean isSupportedCalendarLocale(Locale locale) {
- Locale base = locale.stripExtensions();
+ Locale base = locale;
+
+ if (base.hasExtensions() || base.getVariant() != "") {
+ base = new Locale.Builder()
+ .setLocale(locale)
+ .clearExtensions()
+ .build();
+ }
+
if (!supportedLocaleSet.contains(base)) {
return false;
}
String requestedCalType = locale.getUnicodeLocaleType("ca");
String nativeCalType =
- getCalendarID(locale.toLanguageTag()).replaceFirst("gregorian", "gregory");
+ getCalendarID(base.toLanguageTag()).replaceFirst("gregorian", "gregory");
if (requestedCalType == null) {
return Calendar.getAvailableCalendarTypes().contains(nativeCalType);
--- a/jdk/src/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -241,14 +241,29 @@
int off = secret.length >> 1;
int seclen = off + (secret.length & 1);
+ byte[] secKey = secret;
+ int keyLen = seclen;
byte[] output = new byte[outputLength];
// P_MD5(S1, label + seed)
- expand(md5, 16, secret, 0, seclen, labelBytes, seed, output,
+ // If we have a long secret, digest it first.
+ if (seclen > 64) { // 64: block size of HMAC-MD5
+ md5.update(secret, 0, seclen);
+ secKey = md5.digest();
+ keyLen = secKey.length;
+ }
+ expand(md5, 16, secKey, 0, keyLen, labelBytes, seed, output,
HMAC_ipad64.clone(), HMAC_opad64.clone());
// P_SHA-1(S2, label + seed)
- expand(sha, 20, secret, off, seclen, labelBytes, seed, output,
+ // If we have a long secret, digest it first.
+ if (seclen > 64) { // 64: block size of HMAC-SHA1
+ sha.update(secret, off, seclen);
+ secKey = sha.digest();
+ keyLen = secKey.length;
+ off = 0;
+ }
+ expand(sha, 20, secKey, off, keyLen, labelBytes, seed, output,
HMAC_ipad64.clone(), HMAC_opad64.clone());
return output;
--- a/jdk/src/share/classes/com/sun/jarsigner/ContentSignerParameters.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/com/sun/jarsigner/ContentSignerParameters.java Tue Apr 23 11:13:38 2013 +0100
@@ -60,6 +60,13 @@
public X509Certificate getTimestampingAuthorityCertificate();
/**
+ * Retrieves the TSAPolicyID for a Timestamping Authority (TSA).
+ *
+ * @return The TSAPolicyID. May be null.
+ */
+ public String getTSAPolicyID();
+
+ /**
* Retrieves the JAR file's signature.
*
* @return The non-null array of signature bytes.
--- a/jdk/src/share/classes/java/nio/file/attribute/BasicFileAttributeView.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/nio/file/attribute/BasicFileAttributeView.java Tue Apr 23 11:13:38 2013 +0100
@@ -147,11 +147,11 @@
* this method has no effect.
*
* <p> <b>Usage Example:</b>
- * Suppose we want to change a file's creation time.
+ * Suppose we want to change a file's last access time.
* <pre>
* Path path = ...
* FileTime time = ...
- * Files.getFileAttributeView(path, BasicFileAttributeView.class).setTimes(null, null, time);
+ * Files.getFileAttributeView(path, BasicFileAttributeView.class).setTimes(null, time, null);
* </pre>
*
* @param lastModifiedTime
--- a/jdk/src/share/classes/java/util/ArrayDeque.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/ArrayDeque.java Tue Apr 23 11:13:38 2013 +0100
@@ -33,7 +33,9 @@
*/
package java.util;
-import java.io.*;
+
+import java.io.Serializable;
+import java.util.function.Consumer;
/**
* Resizable-array implementation of the {@link Deque} interface. Array
@@ -44,16 +46,16 @@
* {@link Stack} when used as a stack, and faster than {@link LinkedList}
* when used as a queue.
*
- * <p>Most <tt>ArrayDeque</tt> operations run in amortized constant time.
+ * <p>Most {@code ArrayDeque} operations run in amortized constant time.
* Exceptions include {@link #remove(Object) remove}, {@link
* #removeFirstOccurrence removeFirstOccurrence}, {@link #removeLastOccurrence
* removeLastOccurrence}, {@link #contains contains}, {@link #iterator
* iterator.remove()}, and the bulk operations, all of which run in linear
* time.
*
- * <p>The iterators returned by this class's <tt>iterator</tt> method are
+ * <p>The iterators returned by this class's {@code iterator} method are
* <i>fail-fast</i>: If the deque is modified at any time after the iterator
- * is created, in any way except through the iterator's own <tt>remove</tt>
+ * is created, in any way except through the iterator's own {@code remove}
* method, the iterator will generally throw a {@link
* ConcurrentModificationException}. Thus, in the face of concurrent
* modification, the iterator fails quickly and cleanly, rather than risking
@@ -63,7 +65,7 @@
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
* as it is, generally speaking, impossible to make any hard guarantees in the
* presence of unsynchronized concurrent modification. Fail-fast iterators
- * throw <tt>ConcurrentModificationException</tt> on a best-effort basis.
+ * throw {@code ConcurrentModificationException} on a best-effort basis.
* Therefore, it would be wrong to write a program that depended on this
* exception for its correctness: <i>the fail-fast behavior of iterators
* should be used only to detect bugs.</i>
@@ -93,20 +95,20 @@
* other. We also guarantee that all array cells not holding
* deque elements are always null.
*/
- private transient E[] elements;
+ transient Object[] elements; // non-private to simplify nested class access
/**
* The index of the element at the head of the deque (which is the
* element that would be removed by remove() or pop()); or an
* arbitrary number equal to tail if the deque is empty.
*/
- private transient int head;
+ transient int head;
/**
* The index at which the next element would be added to the tail
* of the deque (via addLast(E), add(E), or push(E)).
*/
- private transient int tail;
+ transient int tail;
/**
* The minimum capacity that we'll use for a newly created deque.
@@ -117,11 +119,10 @@
// ****** Array allocation and resizing utilities ******
/**
- * Allocate empty array to hold the given number of elements.
+ * Allocates empty array to hold the given number of elements.
*
* @param numElements the number of elements to hold
*/
- @SuppressWarnings("unchecked")
private void allocateElements(int numElements) {
int initialCapacity = MIN_INITIAL_CAPACITY;
// Find the best power of two to hold elements.
@@ -138,11 +139,11 @@
if (initialCapacity < 0) // Too many elements, must back off
initialCapacity >>>= 1;// Good luck allocating 2 ^ 30 elements
}
- elements = (E[]) new Object[initialCapacity];
+ elements = new Object[initialCapacity];
}
/**
- * Double the capacity of this deque. Call only when full, i.e.,
+ * Doubles the capacity of this deque. Call only when full, i.e.,
* when head and tail have wrapped around to become equal.
*/
private void doubleCapacity() {
@@ -153,8 +154,7 @@
int newCapacity = n << 1;
if (newCapacity < 0)
throw new IllegalStateException("Sorry, deque too big");
- @SuppressWarnings("unchecked")
- E[] a = (E[]) new Object[newCapacity];
+ Object[] a = new Object[newCapacity];
System.arraycopy(elements, p, a, 0, r);
System.arraycopy(elements, 0, a, r, p);
elements = a;
@@ -184,9 +184,8 @@
* Constructs an empty array deque with an initial capacity
* sufficient to hold 16 elements.
*/
- @SuppressWarnings("unchecked")
public ArrayDeque() {
- elements = (E[]) new Object[16];
+ elements = new Object[16];
}
/**
@@ -252,7 +251,7 @@
* Inserts the specified element at the front of this deque.
*
* @param e the element to add
- * @return <tt>true</tt> (as specified by {@link Deque#offerFirst})
+ * @return {@code true} (as specified by {@link Deque#offerFirst})
* @throws NullPointerException if the specified element is null
*/
public boolean offerFirst(E e) {
@@ -264,7 +263,7 @@
* Inserts the specified element at the end of this deque.
*
* @param e the element to add
- * @return <tt>true</tt> (as specified by {@link Deque#offerLast})
+ * @return {@code true} (as specified by {@link Deque#offerLast})
* @throws NullPointerException if the specified element is null
*/
public boolean offerLast(E e) {
@@ -294,7 +293,9 @@
public E pollFirst() {
int h = head;
- E result = elements[h]; // Element is null if deque empty
+ @SuppressWarnings("unchecked")
+ E result = (E) elements[h];
+ // Element is null if deque empty
if (result == null)
return null;
elements[h] = null; // Must null out slot
@@ -304,7 +305,8 @@
public E pollLast() {
int t = (tail - 1) & (elements.length - 1);
- E result = elements[t];
+ @SuppressWarnings("unchecked")
+ E result = (E) elements[t];
if (result == null)
return null;
elements[t] = null;
@@ -316,48 +318,53 @@
* @throws NoSuchElementException {@inheritDoc}
*/
public E getFirst() {
- E x = elements[head];
- if (x == null)
+ @SuppressWarnings("unchecked")
+ E result = (E) elements[head];
+ if (result == null)
throw new NoSuchElementException();
- return x;
+ return result;
}
/**
* @throws NoSuchElementException {@inheritDoc}
*/
public E getLast() {
- E x = elements[(tail - 1) & (elements.length - 1)];
- if (x == null)
+ @SuppressWarnings("unchecked")
+ E result = (E) elements[(tail - 1) & (elements.length - 1)];
+ if (result == null)
throw new NoSuchElementException();
- return x;
+ return result;
}
+ @SuppressWarnings("unchecked")
public E peekFirst() {
- return elements[head]; // elements[head] is null if deque empty
+ // elements[head] is null if deque empty
+ return (E) elements[head];
}
+ @SuppressWarnings("unchecked")
public E peekLast() {
- return elements[(tail - 1) & (elements.length - 1)];
+ return (E) elements[(tail - 1) & (elements.length - 1)];
}
/**
* Removes the first occurrence of the specified element in this
* deque (when traversing the deque from head to tail).
* If the deque does not contain the element, it is unchanged.
- * More formally, removes the first element <tt>e</tt> such that
- * <tt>o.equals(e)</tt> (if such an element exists).
- * Returns <tt>true</tt> if this deque contained the specified element
+ * More formally, removes the first element {@code e} such that
+ * {@code o.equals(e)} (if such an element exists).
+ * Returns {@code true} if this deque contained the specified element
* (or equivalently, if this deque changed as a result of the call).
*
* @param o element to be removed from this deque, if present
- * @return <tt>true</tt> if the deque contained the specified element
+ * @return {@code true} if the deque contained the specified element
*/
public boolean removeFirstOccurrence(Object o) {
if (o == null)
return false;
int mask = elements.length - 1;
int i = head;
- E x;
+ Object x;
while ( (x = elements[i]) != null) {
if (o.equals(x)) {
delete(i);
@@ -372,20 +379,20 @@
* Removes the last occurrence of the specified element in this
* deque (when traversing the deque from head to tail).
* If the deque does not contain the element, it is unchanged.
- * More formally, removes the last element <tt>e</tt> such that
- * <tt>o.equals(e)</tt> (if such an element exists).
- * Returns <tt>true</tt> if this deque contained the specified element
+ * More formally, removes the last element {@code e} such that
+ * {@code o.equals(e)} (if such an element exists).
+ * Returns {@code true} if this deque contained the specified element
* (or equivalently, if this deque changed as a result of the call).
*
* @param o element to be removed from this deque, if present
- * @return <tt>true</tt> if the deque contained the specified element
+ * @return {@code true} if the deque contained the specified element
*/
public boolean removeLastOccurrence(Object o) {
if (o == null)
return false;
int mask = elements.length - 1;
int i = (tail - 1) & mask;
- E x;
+ Object x;
while ( (x = elements[i]) != null) {
if (o.equals(x)) {
delete(i);
@@ -404,7 +411,7 @@
* <p>This method is equivalent to {@link #addLast}.
*
* @param e the element to add
- * @return <tt>true</tt> (as specified by {@link Collection#add})
+ * @return {@code true} (as specified by {@link Collection#add})
* @throws NullPointerException if the specified element is null
*/
public boolean add(E e) {
@@ -418,7 +425,7 @@
* <p>This method is equivalent to {@link #offerLast}.
*
* @param e the element to add
- * @return <tt>true</tt> (as specified by {@link Queue#offer})
+ * @return {@code true} (as specified by {@link Queue#offer})
* @throws NullPointerException if the specified element is null
*/
public boolean offer(E e) {
@@ -443,12 +450,12 @@
/**
* Retrieves and removes the head of the queue represented by this deque
* (in other words, the first element of this deque), or returns
- * <tt>null</tt> if this deque is empty.
+ * {@code null} if this deque is empty.
*
* <p>This method is equivalent to {@link #pollFirst}.
*
* @return the head of the queue represented by this deque, or
- * <tt>null</tt> if this deque is empty
+ * {@code null} if this deque is empty
*/
public E poll() {
return pollFirst();
@@ -470,12 +477,12 @@
/**
* Retrieves, but does not remove, the head of the queue represented by
- * this deque, or returns <tt>null</tt> if this deque is empty.
+ * this deque, or returns {@code null} if this deque is empty.
*
* <p>This method is equivalent to {@link #peekFirst}.
*
* @return the head of the queue represented by this deque, or
- * <tt>null</tt> if this deque is empty
+ * {@code null} if this deque is empty
*/
public E peek() {
return peekFirst();
@@ -530,7 +537,7 @@
*/
private boolean delete(int i) {
checkInvariants();
- final E[] elements = this.elements;
+ final Object[] elements = this.elements;
final int mask = elements.length - 1;
final int h = head;
final int t = tail;
@@ -579,9 +586,9 @@
}
/**
- * Returns <tt>true</tt> if this deque contains no elements.
+ * Returns {@code true} if this deque contains no elements.
*
- * @return <tt>true</tt> if this deque contains no elements
+ * @return {@code true} if this deque contains no elements
*/
public boolean isEmpty() {
return head == tail;
@@ -628,7 +635,8 @@
public E next() {
if (cursor == fence)
throw new NoSuchElementException();
- E result = elements[cursor];
+ @SuppressWarnings("unchecked")
+ E result = (E) elements[cursor];
// This check doesn't catch all possible comodifications,
// but does catch the ones that corrupt traversal
if (tail != fence || result == null)
@@ -647,6 +655,20 @@
}
lastRet = -1;
}
+
+ public void forEachRemaining(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ Object[] a = elements;
+ int m = a.length - 1, f = fence, i = cursor;
+ cursor = f;
+ while (i != f) {
+ @SuppressWarnings("unchecked") E e = (E)a[i];
+ i = (i + 1) & m;
+ if (e == null)
+ throw new ConcurrentModificationException();
+ action.accept(e);
+ }
+ }
}
private class DescendingIterator implements Iterator<E> {
@@ -667,7 +689,8 @@
if (cursor == fence)
throw new NoSuchElementException();
cursor = (cursor - 1) & (elements.length - 1);
- E result = elements[cursor];
+ @SuppressWarnings("unchecked")
+ E result = (E) elements[cursor];
if (head != fence || result == null)
throw new ConcurrentModificationException();
lastRet = cursor;
@@ -686,19 +709,19 @@
}
/**
- * Returns <tt>true</tt> if this deque contains the specified element.
- * More formally, returns <tt>true</tt> if and only if this deque contains
- * at least one element <tt>e</tt> such that <tt>o.equals(e)</tt>.
+ * Returns {@code true} if this deque contains the specified element.
+ * More formally, returns {@code true} if and only if this deque contains
+ * at least one element {@code e} such that {@code o.equals(e)}.
*
* @param o object to be checked for containment in this deque
- * @return <tt>true</tt> if this deque contains the specified element
+ * @return {@code true} if this deque contains the specified element
*/
public boolean contains(Object o) {
if (o == null)
return false;
int mask = elements.length - 1;
int i = head;
- E x;
+ Object x;
while ( (x = elements[i]) != null) {
if (o.equals(x))
return true;
@@ -710,15 +733,15 @@
/**
* Removes a single instance of the specified element from this deque.
* If the deque does not contain the element, it is unchanged.
- * More formally, removes the first element <tt>e</tt> such that
- * <tt>o.equals(e)</tt> (if such an element exists).
- * Returns <tt>true</tt> if this deque contained the specified element
+ * More formally, removes the first element {@code e} such that
+ * {@code o.equals(e)} (if such an element exists).
+ * Returns {@code true} if this deque contained the specified element
* (or equivalently, if this deque changed as a result of the call).
*
- * <p>This method is equivalent to {@link #removeFirstOccurrence}.
+ * <p>This method is equivalent to {@link #removeFirstOccurrence(Object)}.
*
* @param o element to be removed from this deque, if present
- * @return <tt>true</tt> if this deque contained the specified element
+ * @return {@code true} if this deque contained the specified element
*/
public boolean remove(Object o) {
return removeFirstOccurrence(o);
@@ -770,22 +793,21 @@
* <p>If this deque fits in the specified array with room to spare
* (i.e., the array has more elements than this deque), the element in
* the array immediately following the end of the deque is set to
- * <tt>null</tt>.
+ * {@code null}.
*
* <p>Like the {@link #toArray()} method, this method acts as bridge between
* array-based and collection-based APIs. Further, this method allows
* precise control over the runtime type of the output array, and may,
* under certain circumstances, be used to save allocation costs.
*
- * <p>Suppose <tt>x</tt> is a deque known to contain only strings.
+ * <p>Suppose {@code x} is a deque known to contain only strings.
* The following code can be used to dump the deque into a newly
- * allocated array of <tt>String</tt>:
+ * allocated array of {@code String}:
*
- * <pre>
- * String[] y = x.toArray(new String[0]);</pre>
+ * <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
*
- * Note that <tt>toArray(new Object[0])</tt> is identical in function to
- * <tt>toArray()</tt>.
+ * Note that {@code toArray(new Object[0])} is identical in function to
+ * {@code toArray()}.
*
* @param a the array into which the elements of the deque are to
* be stored, if it is big enough; otherwise, a new array of the
@@ -818,28 +840,25 @@
public ArrayDeque<E> clone() {
try {
@SuppressWarnings("unchecked")
- ArrayDeque<E> result = (ArrayDeque<E>) super.clone();
+ ArrayDeque<E> result = (ArrayDeque<E>) super.clone();
result.elements = Arrays.copyOf(elements, elements.length);
return result;
-
} catch (CloneNotSupportedException e) {
throw new AssertionError();
}
}
- /**
- * Appease the serialization gods.
- */
private static final long serialVersionUID = 2340985798034038923L;
/**
- * Serialize this deque.
+ * Saves this deque to a stream (that is, serializes it).
*
- * @serialData The current size (<tt>int</tt>) of the deque,
+ * @serialData The current size ({@code int}) of the deque,
* followed by all of its elements (each an object reference) in
* first-to-last order.
*/
- private void writeObject(ObjectOutputStream s) throws IOException {
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws java.io.IOException {
s.defaultWriteObject();
// Write out size
@@ -852,11 +871,10 @@
}
/**
- * Deserialize this deque.
+ * Reconstitutes this deque from a stream (that is, deserializes it).
*/
- @SuppressWarnings("unchecked")
- private void readObject(ObjectInputStream s)
- throws IOException, ClassNotFoundException {
+ private void readObject(java.io.ObjectInputStream s)
+ throws java.io.IOException, ClassNotFoundException {
s.defaultReadObject();
// Read in size and allocate array
@@ -867,6 +885,88 @@
// Read in all elements in the proper order.
for (int i = 0; i < size; i++)
- elements[i] = (E)s.readObject();
+ elements[i] = s.readObject();
+ }
+
+ public Spliterator<E> spliterator() {
+ return new DeqSpliterator<E>(this, -1, -1);
}
+
+ static final class DeqSpliterator<E> implements Spliterator<E> {
+ private final ArrayDeque<E> deq;
+ private int fence; // -1 until first use
+ private int index; // current index, modified on traverse/split
+
+ /** Creates new spliterator covering the given array and range */
+ DeqSpliterator(ArrayDeque<E> deq, int origin, int fence) {
+ this.deq = deq;
+ this.index = origin;
+ this.fence = fence;
+ }
+
+ private int getFence() { // force initialization
+ int t;
+ if ((t = fence) < 0) {
+ t = fence = deq.tail;
+ index = deq.head;
+ }
+ return t;
+ }
+
+ public DeqSpliterator<E> trySplit() {
+ int t = getFence(), h = index, n = deq.elements.length;
+ if (h != t && ((h + 1) & (n - 1)) != t) {
+ if (h > t)
+ t += n;
+ int m = ((h + t) >>> 1) & (n - 1);
+ return new DeqSpliterator<>(deq, h, index = m);
+ }
+ return null;
+ }
+
+ public void forEachRemaining(Consumer<? super E> consumer) {
+ if (consumer == null)
+ throw new NullPointerException();
+ Object[] a = deq.elements;
+ int m = a.length - 1, f = getFence(), i = index;
+ index = f;
+ while (i != f) {
+ @SuppressWarnings("unchecked") E e = (E)a[i];
+ i = (i + 1) & m;
+ if (e == null)
+ throw new ConcurrentModificationException();
+ consumer.accept(e);
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super E> consumer) {
+ if (consumer == null)
+ throw new NullPointerException();
+ Object[] a = deq.elements;
+ int m = a.length - 1, f = getFence(), i = index;
+ if (i != fence) {
+ @SuppressWarnings("unchecked") E e = (E)a[i];
+ index = (i + 1) & m;
+ if (e == null)
+ throw new ConcurrentModificationException();
+ consumer.accept(e);
+ return true;
+ }
+ return false;
+ }
+
+ public long estimateSize() {
+ int n = getFence() - index;
+ if (n < 0)
+ n += deq.elements.length;
+ return (long) n;
+ }
+
+ @Override
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED |
+ Spliterator.NONNULL | Spliterator.SUBSIZED;
+ }
+ }
+
}
--- a/jdk/src/share/classes/java/util/ArrayList.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/ArrayList.java Tue Apr 23 11:13:38 2013 +0100
@@ -25,6 +25,14 @@
package java.util;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.function.UnaryOperator;
+
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.function.UnaryOperator;
+
/**
* Resizable-array implementation of the <tt>List</tt> interface. Implements
* all optional list operations, and permits all elements, including
@@ -120,7 +128,7 @@
* empty ArrayList with elementData == EMPTY_ELEMENTDATA will be expanded to
* DEFAULT_CAPACITY when the first element is added.
*/
- private transient Object[] elementData;
+ transient Object[] elementData; // non-private to simplify nested class access
/**
* The size of the ArrayList (the number of elements it contains).
@@ -853,6 +861,27 @@
}
}
+ @Override
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super E> consumer) {
+ Objects.requireNonNull(consumer);
+ final int size = ArrayList.this.size;
+ int i = cursor;
+ if (i >= size) {
+ return;
+ }
+ final Object[] elementData = ArrayList.this.elementData;
+ if (i >= elementData.length) {
+ throw new ConcurrentModificationException();
+ }
+ while (i != size && modCount == expectedModCount) {
+ consumer.accept((E) elementData[i++]);
+ }
+ // update once at end of iteration to reduce heap write traffic
+ lastRet = cursor = i;
+ checkForComodification();
+ }
+
final void checkForComodification() {
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
@@ -1088,6 +1117,26 @@
return (E) elementData[offset + (lastRet = i)];
}
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super E> consumer) {
+ Objects.requireNonNull(consumer);
+ final int size = SubList.this.size;
+ int i = cursor;
+ if (i >= size) {
+ return;
+ }
+ final Object[] elementData = ArrayList.this.elementData;
+ if (offset + i >= elementData.length) {
+ throw new ConcurrentModificationException();
+ }
+ while (i != size && modCount == expectedModCount) {
+ consumer.accept((E) elementData[offset + (i++)]);
+ }
+ // update once at end of iteration to reduce heap write traffic
+ lastRet = cursor = i;
+ checkForComodification();
+ }
+
public int nextIndex() {
return cursor;
}
@@ -1167,5 +1216,217 @@
if (ArrayList.this.modCount != this.modCount)
throw new ConcurrentModificationException();
}
+
+ public Spliterator<E> spliterator() {
+ checkForComodification();
+ return new ArrayListSpliterator<E>(ArrayList.this, offset,
+ offset + this.size, this.modCount);
+ }
+ }
+
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ final int expectedModCount = modCount;
+ @SuppressWarnings("unchecked")
+ final E[] elementData = (E[]) this.elementData;
+ final int size = this.size;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ action.accept(elementData[i]);
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public Spliterator<E> spliterator() {
+ return new ArrayListSpliterator<>(this, 0, -1, 0);
+ }
+
+ /** Index-based split-by-two, lazily initialized Spliterator */
+ static final class ArrayListSpliterator<E> implements Spliterator<E> {
+
+ /*
+ * If ArrayLists were immutable, or structurally immutable (no
+ * adds, removes, etc), we could implement their spliterators
+ * with Arrays.spliterator. Instead we detect as much
+ * interference during traversal as practical without
+ * sacrificing much performance. We rely primarily on
+ * modCounts. These are not guaranteed to detect concurrency
+ * violations, and are sometimes overly conservative about
+ * within-thread interference, but detect enough problems to
+ * be worthwhile in practice. To carry this out, we (1) lazily
+ * initialize fence and expectedModCount until the latest
+ * point that we need to commit to the state we are checking
+ * against; thus improving precision. (This doesn't apply to
+ * SubLists, that create spliterators with current non-lazy
+ * values). (2) We perform only a single
+ * ConcurrentModificationException check at the end of forEach
+ * (the most performance-sensitive method). When using forEach
+ * (as opposed to iterators), we can normally only detect
+ * interference after actions, not before. Further
+ * CME-triggering checks apply to all other possible
+ * violations of assumptions for example null or too-small
+ * elementData array given its size(), that could only have
+ * occurred due to interference. This allows the inner loop
+ * of forEach to run without any further checks, and
+ * simplifies lambda-resolution. While this does entail a
+ * number of checks, note that in the common case of
+ * list.stream().forEach(a), no checks or other computation
+ * occur anywhere other than inside forEach itself. The other
+ * less-often-used methods cannot take advantage of most of
+ * these streamlinings.
+ */
+
+ private final ArrayList<E> list;
+ private int index; // current index, modified on advance/split
+ private int fence; // -1 until used; then one past last index
+ private int expectedModCount; // initialized when fence set
+
+ /** Create new spliterator covering the given range */
+ ArrayListSpliterator(ArrayList<E> list, int origin, int fence,
+ int expectedModCount) {
+ this.list = list; // OK if null unless traversed
+ this.index = origin;
+ this.fence = fence;
+ this.expectedModCount = expectedModCount;
+ }
+
+ private int getFence() { // initialize fence to size on first use
+ int hi; // (a specialized variant appears in method forEach)
+ ArrayList<E> lst;
+ if ((hi = fence) < 0) {
+ if ((lst = list) == null)
+ hi = fence = 0;
+ else {
+ expectedModCount = lst.modCount;
+ hi = fence = lst.size;
+ }
+ }
+ return hi;
+ }
+
+ public ArrayListSpliterator<E> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null : // divide range in half unless too small
+ new ArrayListSpliterator<E>(list, lo, index = mid,
+ expectedModCount);
+ }
+
+ public boolean tryAdvance(Consumer<? super E> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int hi = getFence(), i = index;
+ if (i < hi) {
+ index = i + 1;
+ @SuppressWarnings("unchecked") E e = (E)list.elementData[i];
+ action.accept(e);
+ if (list.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ return false;
+ }
+
+ public void forEachRemaining(Consumer<? super E> action) {
+ int i, hi, mc; // hoist accesses and checks from loop
+ ArrayList<E> lst; Object[] a;
+ if (action == null)
+ throw new NullPointerException();
+ if ((lst = list) != null && (a = lst.elementData) != null) {
+ if ((hi = fence) < 0) {
+ mc = lst.modCount;
+ hi = lst.size;
+ }
+ else
+ mc = expectedModCount;
+ if ((i = index) >= 0 && (index = hi) <= a.length) {
+ for (; i < hi; ++i) {
+ @SuppressWarnings("unchecked") E e = (E) a[i];
+ action.accept(e);
+ }
+ if (lst.modCount == mc)
+ return;
+ }
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ public long estimateSize() {
+ return (long) (getFence() - index);
+ }
+
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED;
+ }
+ }
+
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ // figure out which elements are to be removed
+ // any exception thrown from the filter predicate at this stage
+ // will leave the collection unmodified
+ int removeCount = 0;
+ final BitSet removeSet = new BitSet(size);
+ final int expectedModCount = modCount;
+ final int size = this.size;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ @SuppressWarnings("unchecked")
+ final E element = (E) elementData[i];
+ if (filter.test(element)) {
+ removeSet.set(i);
+ removeCount++;
+ }
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+
+ // shift surviving elements left over the spaces left by removed elements
+ final boolean anyToRemove = removeCount > 0;
+ if (anyToRemove) {
+ final int newSize = size - removeCount;
+ for (int i=0, j=0; (i < size) && (j < newSize); i++, j++) {
+ i = removeSet.nextClearBit(i);
+ elementData[j] = elementData[i];
+ }
+ for (int k=newSize; k < size; k++) {
+ elementData[k] = null; // Let gc do its work
+ }
+ this.size = newSize;
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ return anyToRemove;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void replaceAll(UnaryOperator<E> operator) {
+ Objects.requireNonNull(operator);
+ final int expectedModCount = modCount;
+ final int size = this.size;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ elementData[i] = operator.apply((E) elementData[i]);
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void sort(Comparator<? super E> c) {
+ final int expectedModCount = modCount;
+ Arrays.sort((E[]) elementData, 0, size, c);
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
}
}
--- a/jdk/src/share/classes/java/util/Collection.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/Collection.java Tue Apr 23 11:13:38 2013 +0100
@@ -25,6 +25,8 @@
package java.util;
+import java.util.function.Predicate;
+
/**
* The root interface in the <i>collection hierarchy</i>. A collection
* represents a group of objects, known as its <i>elements</i>. Some
@@ -373,6 +375,40 @@
boolean removeAll(Collection<?> c);
/**
+ * Removes all of the elements of this collection that satisfy the given
+ * predicate. Errors or runtime exceptions thrown by the predicate are
+ * relayed to the caller.
+ *
+ * @implSpec
+ * The default implementation traverses all elements of the collection using
+ * its {@link #iterator}. Each matching element is removed using
+ * {@link Iterator#remove()}. If the collection's iterator does not
+ * support removal then an {@code UnsupportedOperationException} will be
+ * thrown on the first matching element.
+ *
+ * @param filter a predicate which returns {@code true} for elements to be
+ * removed
+ * @return {@code true} if any elements were removed
+ * @throws NullPointerException if the specified filter is null
+ * @throws UnsupportedOperationException if the {@code remove}
+ * method is not supported by this collection's
+ * {@link #iterator}
+ * @since 1.8
+ */
+ default boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ boolean removed = false;
+ final Iterator<E> each = iterator();
+ while (each.hasNext()) {
+ if (filter.test(each.next())) {
+ each.remove();
+ removed = true;
+ }
+ }
+ return removed;
+ }
+
+ /**
* Retains only the elements in this collection that are contained in the
* specified collection (optional operation). In other words, removes from
* this collection all of its elements that are not contained in the
--- a/jdk/src/share/classes/java/util/Collections.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/Collections.java Tue Apr 23 11:13:38 2013 +0100
@@ -30,7 +30,10 @@
import java.lang.reflect.Array;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
+import java.util.function.Consumer;
import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.function.UnaryOperator;
/**
* This class consists exclusively of static methods that operate on or return
@@ -1085,6 +1088,11 @@
public void remove() {
throw new UnsupportedOperationException();
}
+ @Override
+ public void forEachRemaining(Consumer<? super E> action) {
+ // Use backing collection version
+ i.forEachRemaining(action);
+ }
};
}
@@ -1110,6 +1118,21 @@
public void clear() {
throw new UnsupportedOperationException();
}
+
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ c.forEach(action);
+ }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ throw new UnsupportedOperationException();
+ }
+ @Override
+ public Spliterator<E> spliterator() {
+ return (Spliterator<E>)c.spliterator();
+ }
+
}
/**
@@ -1240,6 +1263,16 @@
public boolean addAll(int index, Collection<? extends E> c) {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ throw new UnsupportedOperationException();
+ }
+ @Override
+ public void sort(Comparator<? super E> c) {
+ throw new UnsupportedOperationException();
+ }
+
public ListIterator<E> listIterator() {return listIterator(0);}
public ListIterator<E> listIterator(final int index) {
@@ -1263,6 +1296,11 @@
public void add(E e) {
throw new UnsupportedOperationException();
}
+
+ @Override
+ public void forEachRemaining(Consumer<? super E> action) {
+ i.forEachRemaining(action);
+ }
};
}
@@ -1642,7 +1680,8 @@
* through the returned collection.<p>
*
* It is imperative that the user manually synchronize on the returned
- * collection when iterating over it:
+ * collection when traversing it via {@link Iterator} or
+ * {@link Spliterator}:
* <pre>
* Collection c = Collections.synchronizedCollection(myCollection);
* ...
@@ -1739,6 +1778,19 @@
public String toString() {
synchronized (mutex) {return c.toString();}
}
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> consumer) {
+ synchronized (mutex) {c.forEach(consumer);}
+ }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ synchronized (mutex) {return c.removeIf(filter);}
+ }
+ @Override
+ public Spliterator<E> spliterator() {
+ return c.spliterator(); // Must be manually synched by user!
+ }
private void writeObject(ObjectOutputStream s) throws IOException {
synchronized (mutex) {s.defaultWriteObject();}
}
@@ -1996,6 +2048,15 @@
}
}
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ synchronized (mutex) {list.replaceAll(operator);}
+ }
+ @Override
+ public void sort(Comparator<? super E> c) {
+ synchronized (mutex) {list.sort(c);}
+ }
+
/**
* SynchronizedRandomAccessList instances are serialized as
* SynchronizedList instances to allow them to be deserialized
@@ -2492,6 +2553,16 @@
// element as we added it)
return c.addAll(checkedCopyOf(coll));
}
+
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {c.forEach(action);}
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ return c.removeIf(filter);
+ }
+ @Override
+ public Spliterator<E> spliterator() {return c.spliterator();}
}
/**
@@ -2747,12 +2818,26 @@
typeCheck(e);
i.add(e);
}
+
+ @Override
+ public void forEachRemaining(Consumer<? super E> action) {
+ i.forEachRemaining(action);
+ }
};
}
public List<E> subList(int fromIndex, int toIndex) {
return new CheckedList<>(list.subList(fromIndex, toIndex), type);
}
+
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ list.replaceAll(operator);
+ }
+ @Override
+ public void sort(Comparator<? super E> c) {
+ list.sort(c);
+ }
}
/**
@@ -3276,6 +3361,10 @@
public boolean hasNext() { return false; }
public E next() { throw new NoSuchElementException(); }
public void remove() { throw new IllegalStateException(); }
+ @Override
+ public void forEachRemaining(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ }
}
/**
@@ -3416,6 +3505,19 @@
return a;
}
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ return false;
+ }
+ @Override
+ public Spliterator<E> spliterator() { return Spliterators.emptySpliterator(); }
+
// Preserves singleton property
private Object readResolve() {
return EMPTY_SET;
@@ -3523,6 +3625,21 @@
public E last() {
throw new NoSuchElementException();
}
+
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ }
+
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ return false;
+ }
+
+ @Override
+ public Spliterator<E> spliterator() { return Spliterators.emptySpliterator(); }
}
/**
@@ -3592,6 +3709,29 @@
public int hashCode() { return 1; }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ return false;
+ }
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ Objects.requireNonNull(operator);
+ }
+ @Override
+ public void sort(Comparator<? super E> c) {
+ Objects.requireNonNull(c);
+ }
+
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ }
+
+ @Override
+ public Spliterator<E> spliterator() { return Spliterators.emptySpliterator(); }
+
// Preserves singleton property
private Object readResolve() {
return EMPTY_LIST;
@@ -3747,6 +3887,60 @@
public void remove() {
throw new UnsupportedOperationException();
}
+ @Override
+ public void forEachRemaining(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ if (hasNext) {
+ action.accept(e);
+ hasNext = false;
+ }
+ }
+ };
+ }
+
+ /**
+ * Creates a {@code Spliterator} with only the specified element
+ *
+ * @param <T> Type of elements
+ * @return A singleton {@code Spliterator}
+ */
+ static <T> Spliterator<T> singletonSpliterator(final T element) {
+ return new Spliterator<T>() {
+ long est = 1;
+
+ @Override
+ public Spliterator<T> trySplit() {
+ return null;
+ }
+
+ @Override
+ public boolean tryAdvance(Consumer<? super T> consumer) {
+ Objects.requireNonNull(consumer);
+ if (est > 0) {
+ est--;
+ consumer.accept(element);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void forEachRemaining(Consumer<? super T> consumer) {
+ tryAdvance(consumer);
+ }
+
+ @Override
+ public long estimateSize() {
+ return est;
+ }
+
+ @Override
+ public int characteristics() {
+ int value = (element != null) ? Spliterator.NONNULL : 0;
+
+ return value | Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.IMMUTABLE |
+ Spliterator.DISTINCT | Spliterator.ORDERED;
+ }
};
}
@@ -3770,6 +3964,20 @@
public int size() {return 1;}
public boolean contains(Object o) {return eq(o, element);}
+
+ // Override default methods for Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ action.accept(element);
+ }
+ @Override
+ public Spliterator<E> spliterator() {
+ return singletonSpliterator(element);
+ }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ throw new UnsupportedOperationException();
+ }
}
/**
@@ -3810,6 +4018,27 @@
throw new IndexOutOfBoundsException("Index: "+index+", Size: 1");
return element;
}
+
+ // Override default methods for Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ action.accept(element);
+ }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ throw new UnsupportedOperationException();
+ }
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ throw new UnsupportedOperationException();
+ }
+ @Override
+ public void sort(Comparator<? super E> c) {
+ }
+ @Override
+ public Spliterator<E> spliterator() {
+ return singletonSpliterator(element);
+ }
}
/**
@@ -4408,6 +4637,19 @@
public boolean retainAll(Collection<?> c) {return s.retainAll(c);}
// addAll is the only inherited implementation
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ s.forEach(action);
+ }
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ return s.removeIf(filter);
+ }
+
+ @Override
+ public Spliterator<E> spliterator() {return s.spliterator();}
+
private static final long serialVersionUID = 2454657854757543876L;
private void readObject(java.io.ObjectInputStream stream)
@@ -4466,5 +4708,15 @@
public boolean removeAll(Collection<?> c) {return q.removeAll(c);}
public boolean retainAll(Collection<?> c) {return q.retainAll(c);}
// We use inherited addAll; forwarding addAll would be wrong
+
+ // Override default methods in Collection
+ @Override
+ public void forEach(Consumer<? super E> action) {q.forEach(action);}
+ @Override
+ public Spliterator<E> spliterator() {return q.spliterator();}
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ return q.removeIf(filter);
+ }
}
}
--- a/jdk/src/share/classes/java/util/Currency.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/Currency.java Tue Apr 23 11:13:38 2013 +0100
@@ -47,9 +47,8 @@
/**
* Represents a currency. Currencies are identified by their ISO 4217 currency
- * codes. Visit the <a href="http://www.iso.org/iso/en/prods-services/popstds/currencycodes.html">
- * ISO web site</a> for more information, including a table of
- * currency codes.
+ * codes. Visit the <a href="http://www.iso.org/iso/home/standards/currency_codes.htm">
+ * ISO web site</a> for more information.
* <p>
* The class is designed so that there's never more than one
* <code>Currency</code> instance for any given currency. Therefore, there's
--- a/jdk/src/share/classes/java/util/HashMap.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/HashMap.java Tue Apr 23 11:13:38 2013 +0100
@@ -1230,6 +1230,14 @@
public void clear() {
HashMap.this.clear();
}
+
+ public Spliterator<K> spliterator() {
+ if (HashMap.this.getClass() == HashMap.class)
+ return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
+ else
+ return Spliterators.spliterator
+ (this, Spliterator.SIZED | Spliterator.DISTINCT);
+ }
}
/**
@@ -1263,6 +1271,14 @@
public void clear() {
HashMap.this.clear();
}
+
+ public Spliterator<V> spliterator() {
+ if (HashMap.this.getClass() == HashMap.class)
+ return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
+ else
+ return Spliterators.spliterator
+ (this, Spliterator.SIZED);
+ }
}
/**
@@ -1310,6 +1326,14 @@
public void clear() {
HashMap.this.clear();
}
+
+ public Spliterator<Map.Entry<K,V>> spliterator() {
+ if (HashMap.this.getClass() == HashMap.class)
+ return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
+ else
+ return Spliterators.spliterator
+ (this, Spliterator.SIZED | Spliterator.DISTINCT);
+ }
}
/**
@@ -1406,4 +1430,257 @@
// These methods are used when serializing HashSets
int capacity() { return table.length; }
float loadFactor() { return loadFactor; }
+
+ /**
+ * Standin until HM overhaul; based loosely on Weak and Identity HM.
+ */
+ static class HashMapSpliterator<K,V> {
+ final HashMap<K,V> map;
+ HashMap.Entry<K,V> current; // current node
+ int index; // current index, modified on advance/split
+ int fence; // one past last index
+ int est; // size estimate
+ int expectedModCount; // for comodification checks
+
+ HashMapSpliterator(HashMap<K,V> m, int origin,
+ int fence, int est,
+ int expectedModCount) {
+ this.map = m;
+ this.index = origin;
+ this.fence = fence;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ }
+
+ final int getFence() { // initialize fence and size on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ HashMap<K,V> m = map;
+ est = m.size;
+ expectedModCount = m.modCount;
+ hi = fence = m.table.length;
+ }
+ return hi;
+ }
+
+ public final long estimateSize() {
+ getFence(); // force init
+ return (long) est;
+ }
+ }
+
+ static final class KeySpliterator<K,V>
+ extends HashMapSpliterator<K,V>
+ implements Spliterator<K> {
+ KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public KeySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid || current != null) ? null :
+ new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super K> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap<K,V> m = map;
+ HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) {
+ HashMap.Entry<K,V> p = current;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ action.accept(p.getKey());
+ p = p.next;
+ }
+ } while (p != null || i < hi);
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public boolean tryAdvance(Consumer<? super K> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])map.table;
+ if (tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ K k = current.getKey();
+ current = current.next;
+ action.accept(k);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT;
+ }
+ }
+
+ static final class ValueSpliterator<K,V>
+ extends HashMapSpliterator<K,V>
+ implements Spliterator<V> {
+ ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public ValueSpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid || current != null) ? null :
+ new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super V> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap<K,V> m = map;
+ HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) {
+ HashMap.Entry<K,V> p = current;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ action.accept(p.getValue());
+ p = p.next;
+ }
+ } while (p != null || i < hi);
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public boolean tryAdvance(Consumer<? super V> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])map.table;
+ if (tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ V v = current.getValue();
+ current = current.next;
+ action.accept(v);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
+ }
+ }
+
+ static final class EntrySpliterator<K,V>
+ extends HashMapSpliterator<K,V>
+ implements Spliterator<Map.Entry<K,V>> {
+ EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public EntrySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid || current != null) ? null :
+ new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap<K,V> m = map;
+ HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab.length >= hi && (i = index) >= 0 && i < (index = hi)) {
+ HashMap.Entry<K,V> p = current;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ action.accept(p);
+ p = p.next;
+ }
+ } while (p != null || i < hi);
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ HashMap.Entry<K,V>[] tab = (HashMap.Entry<K,V>[])map.table;
+ if (tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ HashMap.Entry<K,V> e = current;
+ current = current.next;
+ action.accept(e);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT;
+ }
+ }
}
--- a/jdk/src/share/classes/java/util/HashSet.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/HashSet.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -311,4 +311,8 @@
map.put(e, PRESENT);
}
}
+
+ public Spliterator<E> spliterator() {
+ return new HashMap.KeySpliterator<E,Object>(map, 0, -1, 0, 0);
+ }
}
--- a/jdk/src/share/classes/java/util/IdentityHashMap.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/IdentityHashMap.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,10 @@
*/
package java.util;
+
import java.io.*;
import java.lang.reflect.Array;
+import java.util.function.Consumer;
/**
* This class implements the <tt>Map</tt> interface with a hash table, using
@@ -162,19 +164,19 @@
/**
* The table, resized as necessary. Length MUST always be a power of two.
*/
- private transient Object[] table;
+ transient Object[] table; // non-private to simplify nested class access
/**
* The number of key-value mappings contained in this identity hash map.
*
* @serial
*/
- private int size;
+ int size;
/**
* The number of modifications, to support fast-fail iterators
*/
- private transient int modCount;
+ transient int modCount;
/**
* The next size value at which to resize (capacity * load factor).
@@ -184,7 +186,7 @@
/**
* Value representing null keys inside tables.
*/
- private static final Object NULL_KEY = new Object();
+ static final Object NULL_KEY = new Object();
/**
* Use NULL_KEY for key if it is null.
@@ -196,7 +198,7 @@
/**
* Returns internal representation of null key back to caller as null.
*/
- private static Object unmaskNull(Object key) {
+ static final Object unmaskNull(Object key) {
return (key == NULL_KEY ? null : key);
}
@@ -1012,7 +1014,7 @@
return result;
}
public Object[] toArray() {
- return toArray(new Object[size()]);
+ return toArray(new Object[0]);
}
@SuppressWarnings("unchecked")
public <T> T[] toArray(T[] a) {
@@ -1042,6 +1044,10 @@
}
return a;
}
+
+ public Spliterator<K> spliterator() {
+ return new KeySpliterator<>(IdentityHashMap.this, 0, -1, 0, 0);
+ }
}
/**
@@ -1095,7 +1101,7 @@
IdentityHashMap.this.clear();
}
public Object[] toArray() {
- return toArray(new Object[size()]);
+ return toArray(new Object[0]);
}
@SuppressWarnings("unchecked")
public <T> T[] toArray(T[] a) {
@@ -1124,6 +1130,10 @@
}
return a;
}
+
+ public Spliterator<V> spliterator() {
+ return new ValueSpliterator<>(IdentityHashMap.this, 0, -1, 0, 0);
+ }
}
/**
@@ -1211,7 +1221,7 @@
}
public Object[] toArray() {
- return toArray(new Object[size()]);
+ return toArray(new Object[0]);
}
@SuppressWarnings("unchecked")
@@ -1242,6 +1252,10 @@
}
return a;
}
+
+ public Spliterator<Map.Entry<K,V>> spliterator() {
+ return new EntrySpliterator<>(IdentityHashMap.this, 0, -1, 0, 0);
+ }
}
@@ -1322,4 +1336,223 @@
tab[i] = k;
tab[i + 1] = value;
}
+
+ /**
+ * Similar form as array-based Spliterators, but skips blank elements,
+ * and guestimates size as decreasing by half per split.
+ */
+ static class IdentityHashMapSpliterator<K,V> {
+ final IdentityHashMap<K,V> map;
+ int index; // current index, modified on advance/split
+ int fence; // -1 until first use; then one past last index
+ int est; // size estimate
+ int expectedModCount; // initialized when fence set
+
+ IdentityHashMapSpliterator(IdentityHashMap<K,V> map, int origin,
+ int fence, int est, int expectedModCount) {
+ this.map = map;
+ this.index = origin;
+ this.fence = fence;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ }
+
+ final int getFence() { // initialize fence and size on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ est = map.size;
+ expectedModCount = map.modCount;
+ hi = fence = map.table.length;
+ }
+ return hi;
+ }
+
+ public final long estimateSize() {
+ getFence(); // force init
+ return (long) est;
+ }
+ }
+
+ static final class KeySpliterator<K,V>
+ extends IdentityHashMapSpliterator<K,V>
+ implements Spliterator<K> {
+ KeySpliterator(IdentityHashMap<K,V> map, int origin, int fence, int est,
+ int expectedModCount) {
+ super(map, origin, fence, est, expectedModCount);
+ }
+
+ public KeySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = ((lo + hi) >>> 1) & ~1;
+ return (lo >= mid) ? null :
+ new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super K> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int i, hi, mc; Object key;
+ IdentityHashMap<K,V> m; Object[] a;
+ if ((m = map) != null && (a = m.table) != null &&
+ (i = index) >= 0 && (index = hi = getFence()) <= a.length) {
+ for (; i < hi; i += 2) {
+ if ((key = a[i]) != null)
+ action.accept((K)unmaskNull(key));
+ }
+ if (m.modCount == expectedModCount)
+ return;
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ @SuppressWarnings("unchecked")
+ public boolean tryAdvance(Consumer<? super K> action) {
+ if (action == null)
+ throw new NullPointerException();
+ Object[] a = map.table;
+ int hi = getFence();
+ while (index < hi) {
+ Object key = a[index];
+ index += 2;
+ if (key != null) {
+ action.accept((K)unmaskNull(key));
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? SIZED : 0) | Spliterator.DISTINCT;
+ }
+ }
+
+ static final class ValueSpliterator<K,V>
+ extends IdentityHashMapSpliterator<K,V>
+ implements Spliterator<V> {
+ ValueSpliterator(IdentityHashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public ValueSpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = ((lo + hi) >>> 1) & ~1;
+ return (lo >= mid) ? null :
+ new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int i, hi, mc;
+ IdentityHashMap<K,V> m; Object[] a;
+ if ((m = map) != null && (a = m.table) != null &&
+ (i = index) >= 0 && (index = hi = getFence()) <= a.length) {
+ for (; i < hi; i += 2) {
+ if (a[i] != null) {
+ @SuppressWarnings("unchecked") V v = (V)a[i+1];
+ action.accept(v);
+ }
+ }
+ if (m.modCount == expectedModCount)
+ return;
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ Object[] a = map.table;
+ int hi = getFence();
+ while (index < hi) {
+ Object key = a[index];
+ @SuppressWarnings("unchecked") V v = (V)a[index+1];
+ index += 2;
+ if (key != null) {
+ action.accept(v);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? SIZED : 0);
+ }
+
+ }
+
+ static final class EntrySpliterator<K,V>
+ extends IdentityHashMapSpliterator<K,V>
+ implements Spliterator<Map.Entry<K,V>> {
+ EntrySpliterator(IdentityHashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public EntrySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = ((lo + hi) >>> 1) & ~1;
+ return (lo >= mid) ? null :
+ new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super Map.Entry<K, V>> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int i, hi, mc;
+ IdentityHashMap<K,V> m; Object[] a;
+ if ((m = map) != null && (a = m.table) != null &&
+ (i = index) >= 0 && (index = hi = getFence()) <= a.length) {
+ for (; i < hi; i += 2) {
+ Object key = a[i];
+ if (key != null) {
+ @SuppressWarnings("unchecked") K k =
+ (K)unmaskNull(key);
+ @SuppressWarnings("unchecked") V v = (V)a[i+1];
+ action.accept
+ (new AbstractMap.SimpleImmutableEntry<K,V>(k, v));
+
+ }
+ }
+ if (m.modCount == expectedModCount)
+ return;
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null)
+ throw new NullPointerException();
+ Object[] a = map.table;
+ int hi = getFence();
+ while (index < hi) {
+ Object key = a[index];
+ @SuppressWarnings("unchecked") V v = (V)a[index+1];
+ index += 2;
+ if (key != null) {
+ @SuppressWarnings("unchecked") K k =
+ (K)unmaskNull(key);
+ action.accept
+ (new AbstractMap.SimpleImmutableEntry<K,V>(k, v));
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return (fence < 0 || est == map.size ? SIZED : 0) | Spliterator.DISTINCT;
+ }
+ }
+
}
--- a/jdk/src/share/classes/java/util/LinkedHashSet.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/LinkedHashSet.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -168,4 +168,18 @@
super(Math.max(2*c.size(), 11), .75f, true);
addAll(c);
}
+
+ /**
+ * Creates a {@code Spliterator}, over the elements in this set, that
+ * reports {@code SIZED}, {@code DISTINCT} and {@code ORDERED}.
+ * Overriding implementations are expected to document if the
+ * {@code Spliterator} reports any additional and relevant characteristic
+ * values.
+ *
+ * @return a {@code Spliterator} over the elements in this set
+ */
+ @Override
+ public Spliterator<E> spliterator() {
+ return Spliterators.spliterator(this, Spliterator.DISTINCT | Spliterator.ORDERED);
+ }
}
--- a/jdk/src/share/classes/java/util/LinkedList.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/LinkedList.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
package java.util;
+import java.util.function.Consumer;
+
/**
* Doubly-linked list implementation of the {@code List} and {@code Deque}
* interfaces. Implements all optional list operations, and permits all
@@ -948,6 +950,16 @@
expectedModCount++;
}
+ public void forEachRemaining(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ while (modCount == expectedModCount && nextIndex < size) {
+ action.accept(next.item);
+ next = next.next;
+ nextIndex++;
+ }
+ checkForComodification();
+ }
+
final void checkForComodification() {
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
@@ -1135,4 +1147,103 @@
for (int i = 0; i < size; i++)
linkLast((E)s.readObject());
}
+
+ public Spliterator<E> spliterator() {
+ return new LLSpliterator<E>(this, -1, 0);
+ }
+
+ /** A customized variant of Spliterators.IteratorSpliterator */
+ static final class LLSpliterator<E> implements Spliterator<E> {
+ static final int BATCH_UNIT = 1 << 10; // batch array size increment
+ static final int MAX_BATCH = 1 << 25; // max batch array size;
+ final LinkedList<E> list; // null OK unless traversed
+ Node<E> current; // current node; null until initialized
+ int est; // size estimate; -1 until first needed
+ int expectedModCount; // initialized when est set
+ int batch; // batch size for splits
+
+ LLSpliterator(LinkedList<E> list, int est, int expectedModCount) {
+ this.list = list;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ }
+
+ final int getEst() {
+ int s; // force initialization
+ final LinkedList<E> lst;
+ if ((s = est) < 0) {
+ if ((lst = list) == null)
+ s = est = 0;
+ else {
+ expectedModCount = lst.modCount;
+ current = lst.first;
+ s = est = lst.size;
+ }
+ }
+ return s;
+ }
+
+ public long estimateSize() { return (long) getEst(); }
+
+ public Spliterator<E> trySplit() {
+ Node<E> p;
+ int s = getEst();
+ if (s > 1 && (p = current) != null) {
+ int n = batch + BATCH_UNIT;
+ if (n > s)
+ n = s;
+ if (n > MAX_BATCH)
+ n = MAX_BATCH;
+ Object[] a;
+ try {
+ a = new Object[n];
+ } catch (OutOfMemoryError oome) {
+ return null;
+ }
+ int j = 0;
+ do { a[j++] = p.item; } while ((p = p.next) != null && j < n);
+ current = p;
+ batch = j;
+ est = s - j;
+ return Spliterators.spliterator(a, 0, j, Spliterator.ORDERED);
+ }
+ return null;
+ }
+
+ public void forEachRemaining(Consumer<? super E> action) {
+ Node<E> p; int n;
+ if (action == null) throw new NullPointerException();
+ if ((n = getEst()) > 0 && (p = current) != null) {
+ current = null;
+ est = 0;
+ do {
+ E e = p.item;
+ p = p.next;
+ action.accept(e);
+ } while (p != null && --n > 0);
+ }
+ if (list.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super E> action) {
+ Node<E> p;
+ if (action == null) throw new NullPointerException();
+ if (getEst() > 0 && (p = current) != null) {
+ --est;
+ E e = p.item;
+ current = p.next;
+ action.accept(e);
+ if (list.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED;
+ }
+ }
+
}
--- a/jdk/src/share/classes/java/util/List.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/List.java Tue Apr 23 11:13:38 2013 +0100
@@ -25,6 +25,8 @@
package java.util;
+import java.util.function.UnaryOperator;
+
/**
* An ordered collection (also known as a <i>sequence</i>). The user of this
* interface has precise control over where in the list each element is
@@ -375,6 +377,64 @@
boolean retainAll(Collection<?> c);
/**
+ * Replaces each element of this list with the result of applying the
+ * operator to that element. Errors or runtime exceptions thrown by
+ * the operator are relayed to the caller.
+ *
+ * @implSpec
+ * The default implementation is equivalent to, for this {@code list}:
+ * <pre>
+ * final ListIterator<E> li = list.listIterator();
+ * while (li.hasNext()) {
+ * li.set(operator.apply(li.next()));
+ * }
+ * </pre>
+ * If the list's list-iterator does not support the {@code set} operation
+ * then an {@code UnsupportedOperationException} will be thrown when
+ * replacing the first element.
+ *
+ * @param operator the operator to apply to each element
+ * @throws UnsupportedOperationException if the {@code set}
+ * operation is not supported by this list
+ * @throws NullPointerException if the specified operator is null or
+ * if the element is replaced with a null value and this list
+ * does not permit null elements
+ * (<a href="Collection.html#optional-restrictions">optional</a>)
+ * @since 1.8
+ */
+ default void replaceAll(UnaryOperator<E> operator) {
+ Objects.requireNonNull(operator);
+ final ListIterator<E> li = this.listIterator();
+ while (li.hasNext()) {
+ li.set(operator.apply(li.next()));
+ }
+ }
+
+ /**
+ * Sorts this list using the supplied {@code Comparator} to compare elements.
+ *
+ * @implSpec
+ * The default implementation is equivalent to, for this {@code list}:
+ * <pre>Collections.sort(list, c)</pre>
+ *
+ * @param c the {@code Comparator} used to compare list elements.
+ * A {@code null} value indicates that the elements'
+ * {@linkplain Comparable natural ordering} should be used
+ * @throws ClassCastException if the list contains elements that are not
+ * <i>mutually comparable</i> using the specified comparator
+ * @throws UnsupportedOperationException if the list's list-iterator does
+ * not support the {@code set} operation
+ * @throws IllegalArgumentException
+ * (<a href="Collection.html#optional-restrictions">optional</a>)
+ * if the comparator is found to violate the {@link Comparator}
+ * contract
+ * @since 1.8
+ */
+ default void sort(Comparator<? super E> c) {
+ Collections.sort(this, c);
+ }
+
+ /**
* Removes all of the elements from this list (optional operation).
* The list will be empty after this call returns.
*
--- a/jdk/src/share/classes/java/util/PriorityQueue.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/PriorityQueue.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
package java.util;
+import java.util.function.Consumer;
+
/**
* An unbounded priority {@linkplain Queue queue} based on a priority heap.
* The elements of the priority queue are ordered according to their
@@ -56,7 +58,7 @@
* the priority queue in any particular order. If you need ordered
* traversal, consider using {@code Arrays.sort(pq.toArray())}.
*
- * <p> <strong>Note that this implementation is not synchronized.</strong>
+ * <p><strong>Note that this implementation is not synchronized.</strong>
* Multiple threads should not access a {@code PriorityQueue}
* instance concurrently if any of the threads modifies the queue.
* Instead, use the thread-safe {@link
@@ -92,7 +94,7 @@
* heap and each descendant d of n, n <= d. The element with the
* lowest value is in queue[0], assuming the queue is nonempty.
*/
- private transient Object[] queue;
+ transient Object[] queue; // non-private to simplify nested class access
/**
* The number of elements in the priority queue.
@@ -109,7 +111,7 @@
* The number of times this priority queue has been
* <i>structurally modified</i>. See AbstractList for gory details.
*/
- private transient int modCount = 0;
+ transient int modCount = 0; // non-private to simplify nested class access
/**
* Creates a {@code PriorityQueue} with the default initial
@@ -332,9 +334,7 @@
@SuppressWarnings("unchecked")
public E peek() {
- if (size == 0)
- return null;
- return (E) queue[0];
+ return (size == 0) ? null : (E) queue[0];
}
private int indexOf(Object o) {
@@ -431,15 +431,14 @@
* precise control over the runtime type of the output array, and may,
* under certain circumstances, be used to save allocation costs.
*
- * <p>Suppose <tt>x</tt> is a queue known to contain only strings.
+ * <p>Suppose {@code x} is a queue known to contain only strings.
* The following code can be used to dump the queue into a newly
- * allocated array of <tt>String</tt>:
+ * allocated array of {@code String}:
*
- * <pre>
- * String[] y = x.toArray(new String[0]);</pre>
+ * <pre> {@code String[] y = x.toArray(new String[0]);}</pre>
*
- * Note that <tt>toArray(new Object[0])</tt> is identical in function to
- * <tt>toArray()</tt>.
+ * Note that {@code toArray(new Object[0])} is identical in function to
+ * {@code toArray()}.
*
* @param a the array into which the elements of the queue are to
* be stored, if it is big enough; otherwise, a new array of the
@@ -452,6 +451,7 @@
*/
@SuppressWarnings("unchecked")
public <T> T[] toArray(T[] a) {
+ final int size = this.size;
if (a.length < size)
// Make a new array of a's runtime type, but my contents:
return (T[]) Arrays.copyOf(queue, size, a.getClass());
@@ -569,15 +569,14 @@
size = 0;
}
+ @SuppressWarnings("unchecked")
public E poll() {
if (size == 0)
return null;
int s = --size;
modCount++;
- @SuppressWarnings("unchecked")
- E result = (E) queue[0];
- @SuppressWarnings("unchecked")
- E x = (E) queue[s];
+ E result = (E) queue[0];
+ E x = (E) queue[s];
queue[s] = null;
if (s != 0)
siftDown(0, x);
@@ -596,15 +595,15 @@
* position before i. This fact is used by iterator.remove so as to
* avoid missing traversing elements.
*/
+ @SuppressWarnings("unchecked")
private E removeAt(int i) {
- assert i >= 0 && i < size;
+ // assert i >= 0 && i < size;
modCount++;
int s = --size;
if (s == i) // removed last element
queue[i] = null;
else {
- @SuppressWarnings("unchecked")
- E moved = (E) queue[s];
+ E moved = (E) queue[s];
queue[s] = null;
siftDown(i, moved);
if (queue[i] == moved) {
@@ -649,12 +648,12 @@
queue[k] = key;
}
+ @SuppressWarnings("unchecked")
private void siftUpUsingComparator(int k, E x) {
while (k > 0) {
int parent = (k - 1) >>> 1;
- @SuppressWarnings("unchecked")
- E e = (E) queue[parent];
- if (comparator.compare(x, e) >= 0)
+ Object e = queue[parent];
+ if (comparator.compare(x, (E) e) >= 0)
break;
queue[k] = e;
k = parent;
@@ -738,8 +737,7 @@
}
/**
- * Saves the state of the instance to a stream (that
- * is, serializes it).
+ * Saves this queue to a stream (that is, serializes it).
*
* @serialData The length of the array backing the instance is
* emitted (int), followed by all of its elements
@@ -747,7 +745,7 @@
* @param s the stream
*/
private void writeObject(java.io.ObjectOutputStream s)
- throws java.io.IOException{
+ throws java.io.IOException {
// Write out element count, and any hidden stuff
s.defaultWriteObject();
@@ -783,4 +781,99 @@
// spec has never explained what that might be.
heapify();
}
+
+ public final Spliterator<E> spliterator() {
+ return new PriorityQueueSpliterator<E>(this, 0, -1, 0);
+ }
+
+ static final class PriorityQueueSpliterator<E> implements Spliterator<E> {
+ /*
+ * This is very similar to ArrayList Spliterator, except for
+ * extra null checks.
+ */
+ private final PriorityQueue<E> pq;
+ private int index; // current index, modified on advance/split
+ private int fence; // -1 until first use
+ private int expectedModCount; // initialized when fence set
+
+ /** Creates new spliterator covering the given range */
+ PriorityQueueSpliterator(PriorityQueue<E> pq, int origin, int fence,
+ int expectedModCount) {
+ this.pq = pq;
+ this.index = origin;
+ this.fence = fence;
+ this.expectedModCount = expectedModCount;
+ }
+
+ private int getFence() { // initialize fence to size on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ expectedModCount = pq.modCount;
+ hi = fence = pq.size;
+ }
+ return hi;
+ }
+
+ public PriorityQueueSpliterator<E> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new PriorityQueueSpliterator<E>(pq, lo, index = mid,
+ expectedModCount);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super E> action) {
+ int i, hi, mc; // hoist accesses and checks from loop
+ PriorityQueue<E> q; Object[] a;
+ if (action == null)
+ throw new NullPointerException();
+ if ((q = pq) != null && (a = q.queue) != null) {
+ if ((hi = fence) < 0) {
+ mc = q.modCount;
+ hi = q.size;
+ }
+ else
+ mc = expectedModCount;
+ if ((i = index) >= 0 && (index = hi) <= a.length) {
+ for (E e;; ++i) {
+ if (i < hi) {
+ if ((e = (E) a[i]) == null) // must be CME
+ break;
+ action.accept(e);
+ }
+ else if (q.modCount != mc)
+ break;
+ else
+ return;
+ }
+ }
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super E> action) {
+ if (action == null)
+ throw new NullPointerException();
+ int hi = getFence(), lo = index;
+ if (lo >= 0 && lo < hi) {
+ index = lo + 1;
+ @SuppressWarnings("unchecked") E e = (E)pq.queue[lo];
+ if (e == null)
+ throw new ConcurrentModificationException();
+ action.accept(e);
+ if (pq.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ return false;
+ }
+
+ public long estimateSize() {
+ return (long) (getFence() - index);
+ }
+
+ public int characteristics() {
+ return Spliterator.SIZED | Spliterator.SUBSIZED | Spliterator.NONNULL;
+ }
+ }
}
--- a/jdk/src/share/classes/java/util/TreeMap.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/TreeMap.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,8 @@
package java.util;
+import java.util.function.Consumer;
+
/**
* A Red-Black tree based {@link NavigableMap} implementation.
* The map is sorted according to the {@linkplain Comparable natural
@@ -971,6 +973,10 @@
public void clear() {
TreeMap.this.clear();
}
+
+ public Spliterator<V> spliterator() {
+ return new ValueSpliterator<K,V>(TreeMap.this, null, null, 0, -1, 0);
+ }
}
class EntrySet extends AbstractSet<Map.Entry<K,V>> {
@@ -1007,6 +1013,10 @@
public void clear() {
TreeMap.this.clear();
}
+
+ public Spliterator<Map.Entry<K,V>> spliterator() {
+ return new EntrySpliterator<K,V>(TreeMap.this, null, null, 0, -1, 0);
+ }
}
/*
@@ -1090,6 +1100,10 @@
public NavigableSet<E> descendingSet() {
return new KeySet<>(m.descendingMap());
}
+
+ public Spliterator<E> spliterator() {
+ return keySpliteratorFor(m);
+ }
}
/**
@@ -1389,6 +1403,8 @@
/** Returns ascending iterator from the perspective of this submap */
abstract Iterator<K> keyIterator();
+ abstract Spliterator<K> keySpliterator();
+
/** Returns descending iterator from the perspective of this submap */
abstract Iterator<K> descendingKeyIterator();
@@ -1650,19 +1666,6 @@
}
}
- final class SubMapKeyIterator extends SubMapIterator<K> {
- SubMapKeyIterator(TreeMap.Entry<K,V> first,
- TreeMap.Entry<K,V> fence) {
- super(first, fence);
- }
- public K next() {
- return nextEntry().key;
- }
- public void remove() {
- removeAscending();
- }
- }
-
final class DescendingSubMapEntryIterator extends SubMapIterator<Map.Entry<K,V>> {
DescendingSubMapEntryIterator(TreeMap.Entry<K,V> last,
TreeMap.Entry<K,V> fence) {
@@ -1677,7 +1680,47 @@
}
}
- final class DescendingSubMapKeyIterator extends SubMapIterator<K> {
+ // Implement minimal Spliterator as KeySpliterator backup
+ final class SubMapKeyIterator extends SubMapIterator<K>
+ implements Spliterator<K> {
+ SubMapKeyIterator(TreeMap.Entry<K,V> first,
+ TreeMap.Entry<K,V> fence) {
+ super(first, fence);
+ }
+ public K next() {
+ return nextEntry().key;
+ }
+ public void remove() {
+ removeAscending();
+ }
+ public Spliterator<K> trySplit() {
+ return null;
+ }
+ public void forEachRemaining(Consumer<? super K> action) {
+ while (hasNext())
+ action.accept(next());
+ }
+ public boolean tryAdvance(Consumer<? super K> action) {
+ if (hasNext()) {
+ action.accept(next());
+ return true;
+ }
+ return false;
+ }
+ public long estimateSize() {
+ return Long.MAX_VALUE;
+ }
+ public int characteristics() {
+ return Spliterator.DISTINCT | Spliterator.ORDERED |
+ Spliterator.SORTED;
+ }
+ public final Comparator<? super K> getComparator() {
+ return NavigableSubMap.this.comparator();
+ }
+ }
+
+ final class DescendingSubMapKeyIterator extends SubMapIterator<K>
+ implements Spliterator<K> {
DescendingSubMapKeyIterator(TreeMap.Entry<K,V> last,
TreeMap.Entry<K,V> fence) {
super(last, fence);
@@ -1688,6 +1731,26 @@
public void remove() {
removeDescending();
}
+ public Spliterator<K> trySplit() {
+ return null;
+ }
+ public void forEachRemaining(Consumer<? super K> action) {
+ while (hasNext())
+ action.accept(next());
+ }
+ public boolean tryAdvance(Consumer<? super K> action) {
+ if (hasNext()) {
+ action.accept(next());
+ return true;
+ }
+ return false;
+ }
+ public long estimateSize() {
+ return Long.MAX_VALUE;
+ }
+ public int characteristics() {
+ return Spliterator.DISTINCT | Spliterator.ORDERED;
+ }
}
}
@@ -1747,6 +1810,10 @@
return new SubMapKeyIterator(absLowest(), absHighFence());
}
+ Spliterator<K> keySpliterator() {
+ return new SubMapKeyIterator(absLowest(), absHighFence());
+ }
+
Iterator<K> descendingKeyIterator() {
return new DescendingSubMapKeyIterator(absHighest(), absLowFence());
}
@@ -1828,6 +1895,10 @@
return new DescendingSubMapKeyIterator(absHighest(), absLowFence());
}
+ Spliterator<K> keySpliterator() {
+ return new DescendingSubMapKeyIterator(absHighest(), absLowFence());
+ }
+
Iterator<K> descendingKeyIterator() {
return new SubMapKeyIterator(absLowest(), absHighFence());
}
@@ -2444,4 +2515,407 @@
level++;
return level;
}
+
+ /**
+ * Currently, we support Spliterator-based versions only for the
+ * full map, in either plain of descending form, otherwise relying
+ * on defaults because size estimation for submaps would dominate
+ * costs. The type tests needed to check these for key views are
+ * not very nice but avoid disrupting existing class
+ * structures. Callers must use plain default spliterators if this
+ * returns null.
+ */
+ static <K> Spliterator<K> keySpliteratorFor(NavigableMap<K,?> m) {
+ if (m instanceof TreeMap) {
+ @SuppressWarnings("unchecked") TreeMap<K,Object> t =
+ (TreeMap<K,Object>) m;
+ return t.keySpliterator();
+ }
+ if (m instanceof DescendingSubMap) {
+ @SuppressWarnings("unchecked") DescendingSubMap<K,?> dm =
+ (DescendingSubMap<K,?>) m;
+ TreeMap<K,?> tm = dm.m;
+ if (dm == tm.descendingMap) {
+ @SuppressWarnings("unchecked") TreeMap<K,Object> t =
+ (TreeMap<K,Object>) tm;
+ return t.descendingKeySpliterator();
+ }
+ }
+ @SuppressWarnings("unchecked") NavigableSubMap<K,?> sm =
+ (NavigableSubMap<K,?>) m;
+ return sm.keySpliterator();
+ }
+
+ final Spliterator<K> keySpliterator() {
+ return new KeySpliterator<K,V>(this, null, null, 0, -1, 0);
+ }
+
+ final Spliterator<K> descendingKeySpliterator() {
+ return new DescendingKeySpliterator<K,V>(this, null, null, 0, -2, 0);
+ }
+
+ /**
+ * Base class for spliterators. Iteration starts at a given
+ * origin and continues up to but not including a given fence (or
+ * null for end). At top-level, for ascending cases, the first
+ * split uses the root as left-fence/right-origin. From there,
+ * right-hand splits replace the current fence with its left
+ * child, also serving as origin for the split-off spliterator.
+ * Left-hands are symmetric. Descending versions place the origin
+ * at the end and invert ascending split rules. This base class
+ * is non-commital about directionality, or whether the top-level
+ * spliterator covers the whole tree. This means that the actual
+ * split mechanics are located in subclasses. Some of the subclass
+ * trySplit methods are identical (except for return types), but
+ * not nicely factorable.
+ *
+ * Currently, subclass versions exist only for the full map
+ * (including descending keys via its descendingMap). Others are
+ * possible but currently not worthwhile because submaps require
+ * O(n) computations to determine size, which substantially limits
+ * potential speed-ups of using custom Spliterators versus default
+ * mechanics.
+ *
+ * To boostrap initialization, external constructors use
+ * negative size estimates: -1 for ascend, -2 for descend.
+ */
+ static class TreeMapSpliterator<K,V> {
+ final TreeMap<K,V> tree;
+ TreeMap.Entry<K,V> current; // traverser; initially first node in range
+ TreeMap.Entry<K,V> fence; // one past last, or null
+ int side; // 0: top, -1: is a left split, +1: right
+ int est; // size estimate (exact only for top-level)
+ int expectedModCount; // for CME checks
+
+ TreeMapSpliterator(TreeMap<K,V> tree,
+ TreeMap.Entry<K,V> origin, TreeMap.Entry<K,V> fence,
+ int side, int est, int expectedModCount) {
+ this.tree = tree;
+ this.current = origin;
+ this.fence = fence;
+ this.side = side;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ }
+
+ final int getEstimate() { // force initialization
+ int s; TreeMap<K,V> t;
+ if ((s = est) < 0) {
+ if ((t = tree) != null) {
+ current = (s == -1) ? t.getFirstEntry() : t.getLastEntry();
+ s = est = t.size;
+ expectedModCount = t.modCount;
+ }
+ else
+ s = est = 0;
+ }
+ return s;
+ }
+
+ public final long estimateSize() {
+ return (long)getEstimate();
+ }
+ }
+
+ static final class KeySpliterator<K,V>
+ extends TreeMapSpliterator<K,V>
+ implements Spliterator<K> {
+ KeySpliterator(TreeMap<K,V> tree,
+ TreeMap.Entry<K,V> origin, TreeMap.Entry<K,V> fence,
+ int side, int est, int expectedModCount) {
+ super(tree, origin, fence, side, est, expectedModCount);
+ }
+
+ public KeySpliterator<K,V> trySplit() {
+ if (est < 0)
+ getEstimate(); // force initialization
+ int d = side;
+ TreeMap.Entry<K,V> e = current, f = fence,
+ s = ((e == null || e == f) ? null : // empty
+ (d == 0) ? tree.root : // was top
+ (d > 0) ? e.right : // was right
+ (d < 0 && f != null) ? f.left : // was left
+ null);
+ if (s != null && s != e && s != f &&
+ tree.compare(e.key, s.key) < 0) { // e not already past s
+ side = 1;
+ return new KeySpliterator<>
+ (tree, e, current = s, -1, est >>>= 1, expectedModCount);
+ }
+ return null;
+ }
+
+ public void forEachRemaining(Consumer<? super K> action) {
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ TreeMap.Entry<K,V> f = fence, e, p, pl;
+ if ((e = current) != null && e != f) {
+ current = f; // exhaust
+ do {
+ action.accept(e.key);
+ if ((p = e.right) != null) {
+ while ((pl = p.left) != null)
+ p = pl;
+ }
+ else {
+ while ((p = e.parent) != null && e == p.right)
+ e = p;
+ }
+ } while ((e = p) != null && e != f);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super K> action) {
+ TreeMap.Entry<K,V> e;
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ if ((e = current) == null || e == fence)
+ return false;
+ current = successor(e);
+ action.accept(e.key);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+
+ public int characteristics() {
+ return (side == 0 ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT | Spliterator.SORTED | Spliterator.ORDERED;
+ }
+
+ public final Comparator<? super K> getComparator() {
+ return tree.comparator;
+ }
+
+ }
+
+ static final class DescendingKeySpliterator<K,V>
+ extends TreeMapSpliterator<K,V>
+ implements Spliterator<K> {
+ DescendingKeySpliterator(TreeMap<K,V> tree,
+ TreeMap.Entry<K,V> origin, TreeMap.Entry<K,V> fence,
+ int side, int est, int expectedModCount) {
+ super(tree, origin, fence, side, est, expectedModCount);
+ }
+
+ public DescendingKeySpliterator<K,V> trySplit() {
+ if (est < 0)
+ getEstimate(); // force initialization
+ int d = side;
+ TreeMap.Entry<K,V> e = current, f = fence,
+ s = ((e == null || e == f) ? null : // empty
+ (d == 0) ? tree.root : // was top
+ (d < 0) ? e.left : // was left
+ (d > 0 && f != null) ? f.right : // was right
+ null);
+ if (s != null && s != e && s != f &&
+ tree.compare(e.key, s.key) > 0) { // e not already past s
+ side = 1;
+ return new DescendingKeySpliterator<>
+ (tree, e, current = s, -1, est >>>= 1, expectedModCount);
+ }
+ return null;
+ }
+
+ public void forEachRemaining(Consumer<? super K> action) {
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ TreeMap.Entry<K,V> f = fence, e, p, pr;
+ if ((e = current) != null && e != f) {
+ current = f; // exhaust
+ do {
+ action.accept(e.key);
+ if ((p = e.left) != null) {
+ while ((pr = p.right) != null)
+ p = pr;
+ }
+ else {
+ while ((p = e.parent) != null && e == p.left)
+ e = p;
+ }
+ } while ((e = p) != null && e != f);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super K> action) {
+ TreeMap.Entry<K,V> e;
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ if ((e = current) == null || e == fence)
+ return false;
+ current = predecessor(e);
+ action.accept(e.key);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+
+ public int characteristics() {
+ return (side == 0 ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT | Spliterator.ORDERED;
+ }
+ }
+
+ static final class ValueSpliterator<K,V>
+ extends TreeMapSpliterator<K,V>
+ implements Spliterator<V> {
+ ValueSpliterator(TreeMap<K,V> tree,
+ TreeMap.Entry<K,V> origin, TreeMap.Entry<K,V> fence,
+ int side, int est, int expectedModCount) {
+ super(tree, origin, fence, side, est, expectedModCount);
+ }
+
+ public ValueSpliterator<K,V> trySplit() {
+ if (est < 0)
+ getEstimate(); // force initialization
+ int d = side;
+ TreeMap.Entry<K,V> e = current, f = fence,
+ s = ((e == null || e == f) ? null : // empty
+ (d == 0) ? tree.root : // was top
+ (d > 0) ? e.right : // was right
+ (d < 0 && f != null) ? f.left : // was left
+ null);
+ if (s != null && s != e && s != f &&
+ tree.compare(e.key, s.key) < 0) { // e not already past s
+ side = 1;
+ return new ValueSpliterator<>
+ (tree, e, current = s, -1, est >>>= 1, expectedModCount);
+ }
+ return null;
+ }
+
+ public void forEachRemaining(Consumer<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ TreeMap.Entry<K,V> f = fence, e, p, pl;
+ if ((e = current) != null && e != f) {
+ current = f; // exhaust
+ do {
+ action.accept(e.value);
+ if ((p = e.right) != null) {
+ while ((pl = p.left) != null)
+ p = pl;
+ }
+ else {
+ while ((p = e.parent) != null && e == p.right)
+ e = p;
+ }
+ } while ((e = p) != null && e != f);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super V> action) {
+ TreeMap.Entry<K,V> e;
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ if ((e = current) == null || e == fence)
+ return false;
+ current = successor(e);
+ action.accept(e.value);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+
+ public int characteristics() {
+ return (side == 0 ? Spliterator.SIZED : 0);
+ }
+ }
+
+ static final class EntrySpliterator<K,V>
+ extends TreeMapSpliterator<K,V>
+ implements Spliterator<Map.Entry<K,V>> {
+ EntrySpliterator(TreeMap<K,V> tree,
+ TreeMap.Entry<K,V> origin, TreeMap.Entry<K,V> fence,
+ int side, int est, int expectedModCount) {
+ super(tree, origin, fence, side, est, expectedModCount);
+ }
+
+ public EntrySpliterator<K,V> trySplit() {
+ if (est < 0)
+ getEstimate(); // force initialization
+ int d = side;
+ TreeMap.Entry<K,V> e = current, f = fence,
+ s = ((e == null || e == f) ? null : // empty
+ (d == 0) ? tree.root : // was top
+ (d > 0) ? e.right : // was right
+ (d < 0 && f != null) ? f.left : // was left
+ null);
+ if (s != null && s != e && s != f &&
+ tree.compare(e.key, s.key) < 0) { // e not already past s
+ side = 1;
+ return new EntrySpliterator<>
+ (tree, e, current = s, -1, est >>>= 1, expectedModCount);
+ }
+ return null;
+ }
+
+ public void forEachRemaining(Consumer<? super Map.Entry<K, V>> action) {
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ TreeMap.Entry<K,V> f = fence, e, p, pl;
+ if ((e = current) != null && e != f) {
+ current = f; // exhaust
+ do {
+ action.accept(e);
+ if ((p = e.right) != null) {
+ while ((pl = p.left) != null)
+ p = pl;
+ }
+ else {
+ while ((p = e.parent) != null && e == p.right)
+ e = p;
+ }
+ } while ((e = p) != null && e != f);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ TreeMap.Entry<K,V> e;
+ if (action == null)
+ throw new NullPointerException();
+ if (est < 0)
+ getEstimate(); // force initialization
+ if ((e = current) == null || e == fence)
+ return false;
+ current = successor(e);
+ action.accept(e);
+ if (tree.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+
+ public int characteristics() {
+ return (side == 0 ? Spliterator.SIZED : 0) |
+ Spliterator.DISTINCT | Spliterator.SORTED | Spliterator.ORDERED;
+ }
+
+ @Override
+ public Comparator<? super Map.Entry<K, V>> getComparator() {
+ return tree.comparator != null ?
+ Comparators.byKey(tree.comparator) : null;
+ }
+ }
}
--- a/jdk/src/share/classes/java/util/TreeSet.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/TreeSet.java Tue Apr 23 11:13:38 2013 +0100
@@ -533,5 +533,9 @@
tm.readTreeSet(size, s, PRESENT);
}
+ public Spliterator<E> spliterator() {
+ return TreeMap.keySpliteratorFor(m);
+ }
+
private static final long serialVersionUID = -2479143000061671589L;
}
--- a/jdk/src/share/classes/java/util/Vector.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/Vector.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1994, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,12 @@
package java.util;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.function.UnaryOperator;
+
+import java.util.function.Consumer;
+
/**
* The {@code Vector} class implements a growable array of
* objects. Like an array, it contains components that can be
@@ -1151,6 +1157,28 @@
lastRet = -1;
}
+ @Override
+ public void forEachRemaining(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ synchronized (Vector.this) {
+ final int size = Vector.this.elementCount;
+ int i = cursor;
+ if (i >= size) {
+ return;
+ }
+ final Object[] elementData = Vector.this.elementData;
+ if (i >= elementData.length) {
+ throw new ConcurrentModificationException();
+ }
+ while (i != size && modCount == expectedModCount) {
+ action.accept((E) elementData[i++]);
+ }
+ // update once at end of iteration to reduce heap write traffic
+ lastRet = cursor = i;
+ checkForComodification();
+ }
+ }
+
final void checkForComodification() {
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
@@ -1209,4 +1237,181 @@
lastRet = -1;
}
}
+
+ @Override
+ public synchronized void forEach(Consumer<? super E> action) {
+ Objects.requireNonNull(action);
+ final int expectedModCount = modCount;
+ @SuppressWarnings("unchecked")
+ final E[] elementData = (E[]) this.elementData;
+ final int elementCount = this.elementCount;
+ for (int i=0; modCount == expectedModCount && i < elementCount; i++) {
+ action.accept(elementData[i]);
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public synchronized boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ // figure out which elements are to be removed
+ // any exception thrown from the filter predicate at this stage
+ // will leave the collection unmodified
+ int removeCount = 0;
+ final int size = elementCount;
+ final BitSet removeSet = new BitSet(size);
+ final int expectedModCount = modCount;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ @SuppressWarnings("unchecked")
+ final E element = (E) elementData[i];
+ if (filter.test(element)) {
+ removeSet.set(i);
+ removeCount++;
+ }
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+
+ // shift surviving elements left over the spaces left by removed elements
+ final boolean anyToRemove = removeCount > 0;
+ if (anyToRemove) {
+ final int newSize = size - removeCount;
+ for (int i=0, j=0; (i < size) && (j < newSize); i++, j++) {
+ i = removeSet.nextClearBit(i);
+ elementData[j] = elementData[i];
+ }
+ for (int k=newSize; k < size; k++) {
+ elementData[k] = null; // Let gc do its work
+ }
+ elementCount = newSize;
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ return anyToRemove;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public synchronized void replaceAll(UnaryOperator<E> operator) {
+ Objects.requireNonNull(operator);
+ final int expectedModCount = modCount;
+ final int size = elementCount;
+ for (int i=0; modCount == expectedModCount && i < size; i++) {
+ elementData[i] = operator.apply((E) elementData[i]);
+ }
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public synchronized void sort(Comparator<? super E> c) {
+ final int expectedModCount = modCount;
+ Arrays.sort((E[]) elementData, 0, elementCount, c);
+ if (modCount != expectedModCount) {
+ throw new ConcurrentModificationException();
+ }
+ modCount++;
+ }
+
+ @Override
+ public Spliterator<E> spliterator() {
+ return new VectorSpliterator<>(this, null, 0, -1, 0);
+ }
+
+ /** Similar to ArrayList Spliterator */
+ static final class VectorSpliterator<E> implements Spliterator<E> {
+ private final Vector<E> list;
+ private Object[] array;
+ private int index; // current index, modified on advance/split
+ private int fence; // -1 until used; then one past last index
+ private int expectedModCount; // initialized when fence set
+
+ /** Create new spliterator covering the given range */
+ VectorSpliterator(Vector<E> list, Object[] array, int origin, int fence,
+ int expectedModCount) {
+ this.list = list;
+ this.array = array;
+ this.index = origin;
+ this.fence = fence;
+ this.expectedModCount = expectedModCount;
+ }
+
+ private int getFence() { // initialize on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ synchronized(list) {
+ array = list.elementData;
+ expectedModCount = list.modCount;
+ hi = fence = list.elementCount;
+ }
+ }
+ return hi;
+ }
+
+ public Spliterator<E> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new VectorSpliterator<E>(list, array, lo, index = mid,
+ expectedModCount);
+ }
+
+ @SuppressWarnings("unchecked")
+ public boolean tryAdvance(Consumer<? super E> action) {
+ int i;
+ if (action == null)
+ throw new NullPointerException();
+ if (getFence() > (i = index)) {
+ index = i + 1;
+ action.accept((E)array[i]);
+ if (list.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ return false;
+ }
+
+ @SuppressWarnings("unchecked")
+ public void forEachRemaining(Consumer<? super E> action) {
+ int i, hi; // hoist accesses and checks from loop
+ Vector<E> lst; Object[] a;
+ if (action == null)
+ throw new NullPointerException();
+ if ((lst = list) != null) {
+ if ((hi = fence) < 0) {
+ synchronized(lst) {
+ expectedModCount = lst.modCount;
+ a = array = lst.elementData;
+ hi = fence = lst.elementCount;
+ }
+ }
+ else
+ a = array;
+ if (a != null && (i = index) >= 0 && (index = hi) <= a.length) {
+ while (i < hi)
+ action.accept((E) a[i++]);
+ if (lst.modCount == expectedModCount)
+ return;
+ }
+ }
+ throw new ConcurrentModificationException();
+ }
+
+ public long estimateSize() {
+ return (long) (getFence() - index);
+ }
+
+ public int characteristics() {
+ return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED;
+ }
+ }
}
--- a/jdk/src/share/classes/java/util/WeakHashMap.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/WeakHashMap.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,10 @@
*/
package java.util;
+
import java.lang.ref.WeakReference;
import java.lang.ref.ReferenceQueue;
+import java.util.function.Consumer;
/**
@@ -898,6 +900,10 @@
public void clear() {
WeakHashMap.this.clear();
}
+
+ public Spliterator<K> spliterator() {
+ return new KeySpliterator<>(WeakHashMap.this, 0, -1, 0, 0);
+ }
}
/**
@@ -934,6 +940,10 @@
public void clear() {
WeakHashMap.this.clear();
}
+
+ public Spliterator<V> spliterator() {
+ return new ValueSpliterator<>(WeakHashMap.this, 0, -1, 0, 0);
+ }
}
/**
@@ -994,5 +1004,288 @@
public <T> T[] toArray(T[] a) {
return deepCopy().toArray(a);
}
+
+ public Spliterator<Map.Entry<K,V>> spliterator() {
+ return new EntrySpliterator<>(WeakHashMap.this, 0, -1, 0, 0);
+ }
}
+
+ /**
+ * Similar form as other hash Spliterators, but skips dead
+ * elements.
+ */
+ static class WeakHashMapSpliterator<K,V> {
+ final WeakHashMap<K,V> map;
+ WeakHashMap.Entry<K,V> current; // current node
+ int index; // current index, modified on advance/split
+ int fence; // -1 until first use; then one past last index
+ int est; // size estimate
+ int expectedModCount; // for comodification checks
+
+ WeakHashMapSpliterator(WeakHashMap<K,V> m, int origin,
+ int fence, int est,
+ int expectedModCount) {
+ this.map = m;
+ this.index = origin;
+ this.fence = fence;
+ this.est = est;
+ this.expectedModCount = expectedModCount;
+ }
+
+ final int getFence() { // initialize fence and size on first use
+ int hi;
+ if ((hi = fence) < 0) {
+ WeakHashMap<K,V> m = map;
+ est = m.size();
+ expectedModCount = m.modCount;
+ hi = fence = m.table.length;
+ }
+ return hi;
+ }
+
+ public final long estimateSize() {
+ getFence(); // force init
+ return (long) est;
+ }
+ }
+
+ static final class KeySpliterator<K,V>
+ extends WeakHashMapSpliterator<K,V>
+ implements Spliterator<K> {
+ KeySpliterator(WeakHashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public KeySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super K> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ WeakHashMap<K,V> m = map;
+ WeakHashMap.Entry<K,V>[] tab = m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab.length >= hi && (i = index) >= 0 && i < hi) {
+ index = hi;
+ WeakHashMap.Entry<K,V> p = current;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ Object x = p.get();
+ p = p.next;
+ if (x != null) {
+ @SuppressWarnings("unchecked") K k =
+ (K) WeakHashMap.unmaskNull(x);
+ action.accept(k);
+ }
+ }
+ } while (p != null || i < hi);
+ }
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super K> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ WeakHashMap.Entry<K,V>[] tab = map.table;
+ if (tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ Object x = current.get();
+ current = current.next;
+ if (x != null) {
+ @SuppressWarnings("unchecked") K k =
+ (K) WeakHashMap.unmaskNull(x);
+ action.accept(k);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return Spliterator.DISTINCT;
+ }
+ }
+
+ static final class ValueSpliterator<K,V>
+ extends WeakHashMapSpliterator<K,V>
+ implements Spliterator<V> {
+ ValueSpliterator(WeakHashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public ValueSpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+ public void forEachRemaining(Consumer<? super V> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ WeakHashMap<K,V> m = map;
+ WeakHashMap.Entry<K,V>[] tab = m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab.length >= hi && (i = index) >= 0 && i < hi) {
+ index = hi;
+ WeakHashMap.Entry<K,V> p = current;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ Object x = p.get();
+ V v = p.value;
+ p = p.next;
+ if (x != null)
+ action.accept(v);
+ }
+ } while (p != null || i < hi);
+ }
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super V> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ WeakHashMap.Entry<K,V>[] tab = map.table;
+ if (tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ Object x = current.get();
+ V v = current.value;
+ current = current.next;
+ if (x != null) {
+ action.accept(v);
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return 0;
+ }
+ }
+
+ static final class EntrySpliterator<K,V>
+ extends WeakHashMapSpliterator<K,V>
+ implements Spliterator<Map.Entry<K,V>> {
+ EntrySpliterator(WeakHashMap<K,V> m, int origin, int fence, int est,
+ int expectedModCount) {
+ super(m, origin, fence, est, expectedModCount);
+ }
+
+ public EntrySpliterator<K,V> trySplit() {
+ int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
+ return (lo >= mid) ? null :
+ new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
+ expectedModCount);
+ }
+
+
+ public void forEachRemaining(Consumer<? super Map.Entry<K, V>> action) {
+ int i, hi, mc;
+ if (action == null)
+ throw new NullPointerException();
+ WeakHashMap<K,V> m = map;
+ WeakHashMap.Entry<K,V>[] tab = m.table;
+ if ((hi = fence) < 0) {
+ mc = expectedModCount = m.modCount;
+ hi = fence = tab.length;
+ }
+ else
+ mc = expectedModCount;
+ if (tab.length >= hi && (i = index) >= 0 && i < hi) {
+ index = hi;
+ WeakHashMap.Entry<K,V> p = current;
+ do {
+ if (p == null)
+ p = tab[i++];
+ else {
+ Object x = p.get();
+ V v = p.value;
+ p = p.next;
+ if (x != null) {
+ @SuppressWarnings("unchecked") K k =
+ (K) WeakHashMap.unmaskNull(x);
+ action.accept
+ (new AbstractMap.SimpleImmutableEntry<K,V>(k, v));
+ }
+ }
+ } while (p != null || i < hi);
+ }
+ if (m.modCount != mc)
+ throw new ConcurrentModificationException();
+ }
+
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ int hi;
+ if (action == null)
+ throw new NullPointerException();
+ WeakHashMap.Entry<K,V>[] tab = map.table;
+ if (tab.length >= (hi = getFence()) && index >= 0) {
+ while (current != null || index < hi) {
+ if (current == null)
+ current = tab[index++];
+ else {
+ Object x = current.get();
+ V v = current.value;
+ current = current.next;
+ if (x != null) {
+ @SuppressWarnings("unchecked") K k =
+ (K) WeakHashMap.unmaskNull(x);
+ action.accept
+ (new AbstractMap.SimpleImmutableEntry<K,V>(k, v));
+ if (map.modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ public int characteristics() {
+ return Spliterator.DISTINCT;
+ }
+ }
+
}
--- a/jdk/src/share/classes/java/util/concurrent/CopyOnWriteArrayList.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/concurrent/CopyOnWriteArrayList.java Tue Apr 23 11:13:38 2013 +0100
@@ -36,6 +36,9 @@
package java.util.concurrent;
import java.util.*;
import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
+import java.util.function.Predicate;
+import java.util.function.UnaryOperator;
/**
* A thread-safe variant of {@link java.util.ArrayList} in which all mutative
@@ -1260,9 +1263,58 @@
}
}
+ @Override
+ public void forEach(Consumer<? super E> action) {
+ @SuppressWarnings("unchecked")
+ final E[] elements = (E[]) l.getArray();
+ checkForComodification();
+ l.forEach(action, elements, offset, offset + size);
+ }
+
+ @Override
+ public void sort(Comparator<? super E> c) {
+ final ReentrantLock lock = l.lock;
+ lock.lock();
+ try {
+ checkForComodification();
+ l.sort(c, offset, offset + size);
+ expectedArray = l.getArray();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ final ReentrantLock lock = l.lock;
+ lock.lock();
+ try {
+ checkForComodification();
+ final int removeCount =
+ l.removeIf(filter, offset, offset + size);
+ expectedArray = l.getArray();
+ size -= removeCount;
+ return removeCount > 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ final ReentrantLock lock = l.lock;
+ lock.lock();
+ try {
+ checkForComodification();
+ l.replaceAll(operator, offset, offset + size);
+ expectedArray = l.getArray();
+ } finally {
+ lock.unlock();
+ }
+ }
}
-
private static class COWSubListIterator<E> implements ListIterator<E> {
private final ListIterator<E> it;
private final int offset;
@@ -1333,4 +1385,139 @@
throw new Error(e);
}
}
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public void forEach(Consumer<? super E> action) {
+ forEach(action, (E[]) getArray(), 0, size());
+ }
+
+ private void forEach(Consumer<? super E> action,
+ final E[] elements,
+ final int from, final int to) {
+ Objects.requireNonNull(action);
+ for (int i = from; i < to; i++) {
+ action.accept(elements[i]);
+ }
+ }
+
+ @Override
+ public void sort(Comparator<? super E> c) {
+ final ReentrantLock lock = this.lock;
+ lock.lock();
+ try {
+ sort(c, 0, size());
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ // must be called with this.lock held
+ @SuppressWarnings("unchecked")
+ private void sort(Comparator<? super E> c, final int from, final int to) {
+ final E[] elements = (E[]) getArray();
+ final E[] newElements = Arrays.copyOf(elements, elements.length);
+ // only elements [from, to) are sorted
+ Arrays.sort(newElements, from, to, c);
+ setArray(newElements);
+ }
+
+ @Override
+ public boolean removeIf(Predicate<? super E> filter) {
+ Objects.requireNonNull(filter);
+ final ReentrantLock lock = this.lock;
+ lock.lock();
+ try {
+ return removeIf(filter, 0, size()) > 0;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ // must be called with this.lock held
+ private int removeIf(Predicate<? super E> filter, final int from, final int to) {
+ Objects.requireNonNull(filter);
+ final ReentrantLock lock = this.lock;
+ lock.lock();
+ try {
+ @SuppressWarnings("unchecked")
+ final E[] elements = (E[]) getArray();
+
+ // figure out which elements are to be removed
+ // any exception thrown from the filter predicate at this stage
+ // will leave the collection unmodified
+ int removeCount = 0;
+ final int range = to - from;
+ final BitSet removeSet = new BitSet(range);
+ for (int i = 0; i < range; i++) {
+ final E element = elements[from + i];
+ if (filter.test(element)) {
+ // removeSet is zero-based to keep its size small
+ removeSet.set(i);
+ removeCount++;
+ }
+ }
+
+ // copy surviving elements into a new array
+ if (removeCount > 0) {
+ final int newSize = elements.length - removeCount;
+ final int newRange = newSize - from;
+ @SuppressWarnings("unchecked")
+ final E[] newElements = (E[]) new Object[newSize];
+ // copy elements before [from, to) unmodified
+ for (int i = 0; i < from; i++) {
+ newElements[i] = elements[i];
+ }
+ // elements [from, to) are subject to removal
+ int j = 0;
+ for (int i = 0; (i < range) && (j < newRange); i++) {
+ i = removeSet.nextClearBit(i);
+ if (i >= range) {
+ break;
+ }
+ newElements[from + (j++)] = elements[from + i];
+ }
+ // copy any remaining elements beyond [from, to)
+ j += from;
+ for (int i = to; (i < elements.length) && (j < newSize); i++) {
+ newElements[j++] = elements[i];
+ }
+ setArray(newElements);
+ }
+
+ return removeCount;
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ @Override
+ public void replaceAll(UnaryOperator<E> operator) {
+ Objects.requireNonNull(operator);
+ final ReentrantLock lock = this.lock;
+ lock.lock();
+ try {
+ replaceAll(operator, 0, size());
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ // must be called with this.lock held
+ @SuppressWarnings("unchecked")
+ private void replaceAll(UnaryOperator<E> operator, final int from, final int to) {
+ final E[] elements = (E[]) getArray();
+ final E[] newElements = (E[]) new Object[elements.length];
+ for (int i = 0; i < from; i++) {
+ newElements[i] = elements[i];
+ }
+ // the operator is only applied to elements [from, to)
+ for (int i = from; i < to; i++) {
+ newElements[i] = operator.apply(elements[i]);
+ }
+ for (int i = to; i < elements.length; i++) {
+ newElements[i] = elements[i];
+ }
+ setArray(newElements);
+ }
}
--- a/jdk/src/share/classes/java/util/logging/LogManager.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/java/util/logging/LogManager.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,10 +35,8 @@
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.beans.PropertyChangeListener;
-import java.net.URL;
import sun.misc.JavaAWTAccess;
import sun.misc.SharedSecrets;
-import sun.security.action.GetPropertyAction;
/**
* There is a single global LogManager object that is used to
@@ -148,7 +146,6 @@
// The global LogManager object
private static LogManager manager;
- private final static Handler[] emptyHandlers = { };
private Properties props = new Properties();
private final static Level defaultLevel = Level.INFO;
@@ -555,14 +552,10 @@
if (name == null) {
throw new NullPointerException();
}
-
- // cleanup some Loggers that have been GC'ed
- manager.drainLoggerRefQueueBounded();
-
LoggerWeakRef ref = namedLoggers.get(name);
if (ref != null) {
if (ref.get() == null) {
- // It's possible that the Logger was GC'ed after the
+ // It's possible that the Logger was GC'ed after a
// drainLoggerRefQueueBounded() call above so allow
// a new one to be registered.
removeLogger(name);
@@ -614,6 +607,8 @@
return true;
}
+ // note: all calls to removeLogger are synchronized on LogManager's
+ // intrinsic lock
void removeLogger(String name) {
namedLoggers.remove(name);
}
@@ -896,6 +891,7 @@
if (name == null) {
throw new NullPointerException();
}
+ drainLoggerRefQueueBounded();
LoggerContext cx = getUserContext();
if (cx.addLocalLogger(logger)) {
// Do we have a per logger handler too?
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/AbstractShortCircuitTask.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ * Abstract class for fork-join tasks used to implement short-circuiting
+ * stream ops, which can produce a result without processing all elements of the
+ * stream.
+ *
+ * @param <P_IN> type of input elements to the pipeline
+ * @param <P_OUT> type of output elements from the pipeline
+ * @param <R> type of intermediate result, may be different from operation
+ * result type
+ * @param <K> type of child and sibling tasks
+ * @since 1.8
+ */
+abstract class AbstractShortCircuitTask<P_IN, P_OUT, R,
+ K extends AbstractShortCircuitTask<P_IN, P_OUT, R, K>>
+ extends AbstractTask<P_IN, P_OUT, R, K> {
+ /**
+ * The result for this computation; this is shared among all tasks and set
+ * exactly once
+ */
+ protected final AtomicReference<R> sharedResult;
+
+ /**
+ * Indicates whether this task has been canceled. Tasks may cancel other
+ * tasks in the computation under various conditions, such as in a
+ * find-first operation, a task that finds a value will cancel all tasks
+ * that are later in the encounter order.
+ */
+ protected volatile boolean canceled;
+
+ /**
+ * Constructor for root tasks.
+ *
+ * @param helper the {@code PipelineHelper} describing the stream pipeline
+ * up to this operation
+ * @param spliterator the {@code Spliterator} describing the source for this
+ * pipeline
+ */
+ protected AbstractShortCircuitTask(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ sharedResult = new AtomicReference<>(null);
+ }
+
+ /**
+ * Constructor for non-root nodes.
+ *
+ * @param parent parent task in the computation tree
+ * @param spliterator the {@code Spliterator} for the portion of the
+ * computation tree described by this task
+ */
+ protected AbstractShortCircuitTask(K parent,
+ Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ sharedResult = parent.sharedResult;
+ }
+
+ /**
+ * Returns the value indicating the computation completed with no task
+ * finding a short-circuitable result. For example, for a "find" operation,
+ * this might be null or an empty {@code Optional}.
+ *
+ * @return the result to return when no task finds a result
+ */
+ protected abstract R getEmptyResult();
+
+ @Override
+ protected boolean canCompute() {
+ // Have we already found an answer?
+ if (sharedResult.get() != null) {
+ tryComplete();
+ return false;
+ } else if (taskCanceled()) {
+ setLocalResult(getEmptyResult());
+ tryComplete();
+ return false;
+ }
+ else {
+ return true;
+ }
+ }
+
+ /**
+ * Declares that a globally valid result has been found. If another task has
+ * not already found the answer, the result is installed in
+ * {@code sharedResult}. The {@code compute()} method will check
+ * {@code sharedResult} before proceeding with computation, so this causes
+ * the computation to terminate early.
+ *
+ * @param result the result found
+ */
+ protected void shortCircuit(R result) {
+ if (result != null)
+ sharedResult.compareAndSet(null, result);
+ }
+
+ /**
+ * Sets a local result for this task. If this task is the root, set the
+ * shared result instead (if not already set).
+ *
+ * @param localResult The result to set for this task
+ */
+ @Override
+ protected void setLocalResult(R localResult) {
+ if (isRoot()) {
+ if (localResult != null)
+ sharedResult.compareAndSet(null, localResult);
+ }
+ else
+ super.setLocalResult(localResult);
+ }
+
+ /**
+ * Retrieves the local result for this task
+ */
+ @Override
+ public R getRawResult() {
+ return getLocalResult();
+ }
+
+ /**
+ * Retrieves the local result for this task. If this task is the root,
+ * retrieves the shared result instead.
+ */
+ @Override
+ public R getLocalResult() {
+ if (isRoot()) {
+ R answer = sharedResult.get();
+ return (answer == null) ? getEmptyResult() : answer;
+ }
+ else
+ return super.getLocalResult();
+ }
+
+ /**
+ * Mark this task as canceled
+ */
+ protected void cancel() {
+ canceled = true;
+ }
+
+ /**
+ * Queries whether this task is canceled. A task is considered canceled if
+ * it or any of its parents have been canceled.
+ *
+ * @return {@code true} if this task or any parent is canceled.
+ */
+ protected boolean taskCanceled() {
+ boolean cancel = canceled;
+ if (!cancel) {
+ for (K parent = getParent(); !cancel && parent != null; parent = parent.getParent())
+ cancel = parent.canceled;
+ }
+
+ return cancel;
+ }
+
+ /**
+ * Cancels all tasks which succeed this one in the encounter order. This
+ * includes canceling all the current task's right sibling, as well as the
+ * later right siblings of all its parents.
+ */
+ protected void cancelLaterNodes() {
+ // Go up the tree, cancel right siblings of this node and all parents
+ for (K parent = getParent(), node = (K) this; parent != null;
+ node = parent, parent = parent.getParent()) {
+ // If node is a left child of parent, then has a right sibling
+ if (parent.leftChild == node) {
+ K rightSibling = parent.rightChild;
+ if (!rightSibling.canceled)
+ rightSibling.cancel();
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/AbstractTask.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,373 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.concurrent.CountedCompleter;
+import java.util.concurrent.ForkJoinPool;
+
+/**
+ * Abstract base class for most fork-join tasks used to implement stream ops.
+ * Manages splitting logic, tracking of child tasks, and intermediate results.
+ * Each task is associated with a {@link Spliterator} that describes the portion
+ * of the input associated with the subtree rooted at this task.
+ * Tasks may be leaf nodes (which will traverse the elements of
+ * the {@code Spliterator}) or internal nodes (which split the
+ * {@code Spliterator} into multiple child tasks).
+ *
+ * @implNote
+ * <p>This class is based on {@link CountedCompleter}, a form of fork-join task
+ * where each task has a semaphore-like count of uncompleted children, and the
+ * task is implicitly completed and notified when its last child completes.
+ * Internal node tasks will likely override the {@code onCompletion} method from
+ * {@code CountedCompleter} to merge the results from child tasks into the
+ * current task's result.
+ *
+ * <p>Splitting and setting up the child task links is done by {@code compute()}
+ * for internal nodes. At {@code compute()} time for leaf nodes, it is
+ * guaranteed that the parent's child-related fields (including sibling links
+ * for the parent's children) will be set up for all children.
+ *
+ * <p>For example, a task that performs a reduce would override {@code doLeaf()}
+ * to perform a reduction on that leaf node's chunk using the
+ * {@code Spliterator}, and override {@code onCompletion()} to merge the results
+ * of the child tasks for internal nodes:
+ *
+ * <pre>{@code
+ * protected S doLeaf() {
+ * spliterator.forEach(...);
+ * return localReductionResult;
+ * }
+ *
+ * public void onCompletion(CountedCompleter caller) {
+ * if (!isLeaf()) {
+ * ReduceTask<P_IN, P_OUT, T, R> child = children;
+ * R result = child.getLocalResult();
+ * child = child.nextSibling;
+ * for (; child != null; child = child.nextSibling)
+ * result = combine(result, child.getLocalResult());
+ * setLocalResult(result);
+ * }
+ * }
+ * }</pre>
+ *
+ * @param <P_IN> Type of elements input to the pipeline
+ * @param <P_OUT> Type of elements output from the pipeline
+ * @param <R> Type of intermediate result, which may be different from operation
+ * result type
+ * @param <K> Type of parent, child and sibling tasks
+ * @since 1.8
+ */
+abstract class AbstractTask<P_IN, P_OUT, R,
+ K extends AbstractTask<P_IN, P_OUT, R, K>>
+ extends CountedCompleter<R> {
+
+ /**
+ * Default target factor of leaf tasks for parallel decomposition.
+ * To allow load balancing, we over-partition, currently to approximately
+ * four tasks per processor, which enables others to help out
+ * if leaf tasks are uneven or some processors are otherwise busy.
+ */
+ static final int LEAF_TARGET = ForkJoinPool.getCommonPoolParallelism() << 2;
+
+ /** The pipeline helper, common to all tasks in a computation */
+ protected final PipelineHelper<P_OUT> helper;
+
+ /**
+ * The spliterator for the portion of the input associated with the subtree
+ * rooted at this task
+ */
+ protected Spliterator<P_IN> spliterator;
+
+ /** Target leaf size, common to all tasks in a computation */
+ protected final long targetSize;
+
+ /**
+ * The left child.
+ * null if no children
+ * if non-null rightChild is non-null
+ */
+ protected K leftChild;
+
+ /**
+ * The right child.
+ * null if no children
+ * if non-null leftChild is non-null
+ */
+ protected K rightChild;
+
+ /** The result of this node, if completed */
+ private R localResult;
+
+ /**
+ * Constructor for root nodes.
+ *
+ * @param helper The {@code PipelineHelper} describing the stream pipeline
+ * up to this operation
+ * @param spliterator The {@code Spliterator} describing the source for this
+ * pipeline
+ */
+ protected AbstractTask(PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(null);
+ this.helper = helper;
+ this.spliterator = spliterator;
+ this.targetSize = suggestTargetSize(spliterator.estimateSize());
+ }
+
+ /**
+ * Constructor for non-root nodes.
+ *
+ * @param parent this node's parent task
+ * @param spliterator {@code Spliterator} describing the subtree rooted at
+ * this node, obtained by splitting the parent {@code Spliterator}
+ */
+ protected AbstractTask(K parent,
+ Spliterator<P_IN> spliterator) {
+ super(parent);
+ this.spliterator = spliterator;
+ this.helper = parent.helper;
+ this.targetSize = parent.targetSize;
+ }
+
+ /**
+ * Constructs a new node of type T whose parent is the receiver; must call
+ * the AbstractTask(T, Spliterator) constructor with the receiver and the
+ * provided Spliterator.
+ *
+ * @param spliterator {@code Spliterator} describing the subtree rooted at
+ * this node, obtained by splitting the parent {@code Spliterator}
+ * @return newly constructed child node
+ */
+ protected abstract K makeChild(Spliterator<P_IN> spliterator);
+
+ /**
+ * Computes the result associated with a leaf node. Will be called by
+ * {@code compute()} and the result passed to @{code setLocalResult()}
+ *
+ * @return the computed result of a leaf node
+ */
+ protected abstract R doLeaf();
+
+ /**
+ * Returns a suggested target leaf size based on the initial size estimate.
+ *
+ * @return suggested target leaf size
+ */
+ public static long suggestTargetSize(long sizeEstimate) {
+ long est = sizeEstimate / LEAF_TARGET;
+ return est > 0L ? est : 1L;
+ }
+
+ /**
+ * Returns a suggestion whether it is advisable to split the provided
+ * spliterator based on target size and other considerations, such as pool
+ * state.
+ *
+ * @return {@code true} if a split is advised otherwise {@code false}
+ */
+ public static boolean suggestSplit(Spliterator spliterator,
+ long targetSize) {
+ long remaining = spliterator.estimateSize();
+ return (remaining > targetSize);
+ // @@@ May additionally want to fold in pool characteristics such as surplus task count
+ }
+
+ /**
+ * Returns a suggestion whether it is adviseable to split this task based on
+ * target size and other considerations.
+ *
+ * @return {@code true} if a split is advised otherwise {@code false}
+ */
+ public boolean suggestSplit() {
+ return suggestSplit(spliterator, targetSize);
+ }
+
+ /**
+ * Returns the local result, if any. Subclasses should use
+ * {@link #setLocalResult(Object)} and {@link #getLocalResult()} to manage
+ * results. This returns the local result so that calls from within the
+ * fork-join framework will return the correct result.
+ *
+ * @return local result for this node previously stored with
+ * {@link #setLocalResult}
+ */
+ @Override
+ public R getRawResult() {
+ return localResult;
+ }
+
+ /**
+ * Does nothing; instead, subclasses should use
+ * {@link #setLocalResult(Object)}} to manage results.
+ *
+ * @param result must be null, or an exception is thrown (this is a safety
+ * tripwire to detect when {@code setRawResult()} is being used
+ * instead of {@code setLocalResult()}
+ */
+ @Override
+ protected void setRawResult(R result) {
+ if (result != null)
+ throw new IllegalStateException();
+ }
+
+ /**
+ * Retrieves a result previously stored with {@link #setLocalResult}
+ *
+ * @return local result for this node previously stored with
+ * {@link #setLocalResult}
+ */
+ protected R getLocalResult() {
+ return localResult;
+ }
+
+ /**
+ * Associates the result with the task, can be retrieved with
+ * {@link #getLocalResult}
+ *
+ * @param localResult local result for this node
+ */
+ protected void setLocalResult(R localResult) {
+ this.localResult = localResult;
+ }
+
+ /**
+ * Indicates whether this task is a leaf node. (Only valid after
+ * {@link #compute} has been called on this node). If the node is not a
+ * leaf node, then children will be non-null and numChildren will be
+ * positive.
+ *
+ * @return {@code true} if this task is a leaf node
+ */
+ protected boolean isLeaf() {
+ return leftChild == null;
+ }
+
+ /**
+ * Indicates whether this task is the root node
+ *
+ * @return {@code true} if this task is the root node.
+ */
+ protected boolean isRoot() {
+ return getParent() == null;
+ }
+
+ /**
+ * Returns the parent of this task, or null if this task is the root
+ *
+ * @return the parent of this task, or null if this task is the root
+ */
+ @SuppressWarnings("unchecked")
+ protected K getParent() {
+ return (K) getCompleter();
+ }
+
+ /**
+ * Decides whether or not to split a task further or compute it directly. If
+ * computing directly, call {@code doLeaf} and pass the result to
+ * {@code setRawResult}. If splitting, set up the child-related fields,
+ * create the child tasks, fork the leftmost (prefix) child tasks, and
+ * compute the rightmost (remaining) child tasks.
+ *
+ * <p>
+ * Computing will continue for rightmost tasks while a task can be computed
+ * as determined by {@link #canCompute()} and that task should and can be
+ * split into left and right tasks.
+ *
+ * <p>
+ * The rightmost tasks are computed in a loop rather than recursively to
+ * avoid potential stack overflows when computing with a right-balanced
+ * tree, such as that produced when splitting with a {@link Spliterator}
+ * created from an {@link java.util.Iterator}.
+ */
+ @Override
+ public final void compute() {
+ @SuppressWarnings("unchecked")
+ K task = (K) this;
+ while (task.canCompute()) {
+ Spliterator<P_IN> split;
+ if (!task.suggestSplit() || (split = task.spliterator.trySplit()) == null) {
+ task.setLocalResult(task.doLeaf());
+ task.tryComplete();
+ return;
+ }
+ else {
+ K l = task.leftChild = task.makeChild(split);
+ K r = task.rightChild = task.makeChild(task.spliterator);
+ task.setPendingCount(1);
+ l.fork();
+ task = r;
+ }
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implNote
+ * Clears spliterator and children fields. Overriders MUST call
+ * {@code super.onCompletion} as the last thing they do if they want these
+ * cleared.
+ */
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ spliterator = null;
+ leftChild = rightChild = null;
+ }
+
+ /**
+ * Determines if the task can be computed.
+ *
+ * @implSpec The default always returns true
+ *
+ * @return {@code true} if this task can be computed to either calculate the
+ * leaf via {@link #doLeaf()} or split, otherwise false if this task
+ * cannot be computed, for example if this task has been canceled
+ * and/or a result for the computation has been found by another
+ * task.
+ */
+ protected boolean canCompute() {
+ return true;
+ }
+
+ /**
+ * Returns whether this node is a "leftmost" node -- whether the path from
+ * the root to this node involves only traversing leftmost child links. For
+ * a leaf node, this means it is the first leaf node in the encounter order.
+ *
+ * @return {@code true} if this node is a "leftmost" node
+ */
+ protected boolean isLeftmostNode() {
+ @SuppressWarnings("unchecked")
+ K node = (K) this;
+ while (node != null) {
+ K parent = node.getParent();
+ if (parent != null && parent.leftChild != node)
+ return false;
+ node = parent;
+ }
+ return true;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/BaseStream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Iterator;
+import java.util.Spliterator;
+
+/**
+ * Base interface for stream types such as {@link Stream}, {@link IntStream},
+ * etc. Contains methods common to all stream types. Many of these methods
+ * are implemented by {@link AbstractPipeline}, even though
+ * {@code AbstractPipeline} does not directly implement {@code BaseStream}.
+ *
+ * @param <T> type of stream elements
+ * @param <S> type of stream implementing {@code BaseStream}
+ * @since 1.8
+ */
+interface BaseStream<T, S extends BaseStream<T, S>> {
+ /**
+ * Returns an iterator for the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return the element iterator for this stream
+ */
+ Iterator<T> iterator();
+
+ /**
+ * Returns a spliterator for the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return the element spliterator for this stream
+ */
+ Spliterator<T> spliterator();
+
+ /**
+ * Returns whether this stream, when executed, would execute in parallel
+ * (assuming no further modification of the stream, such as appending
+ * further intermediate operations or changing its parallelism). Calling
+ * this method after invoking an intermediate or terminal stream operation
+ * method may yield unpredictable results.
+ *
+ * @return {@code true} if this stream would execute in parallel if executed
+ * without further modification otherwise {@code false}
+ */
+ boolean isParallel();
+
+ /**
+ * Returns an equivalent stream that is sequential. May return
+ * itself, either because the stream was already sequential, or because
+ * the underlying stream state was modified to be sequential.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a sequential stream
+ */
+ S sequential();
+
+ /**
+ * Returns an equivalent stream that is parallel. May return
+ * itself, either because the stream was already parallel, or because
+ * the underlying stream state was modified to be parallel.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return a parallel stream
+ */
+ S parallel();
+
+ /**
+ * Returns an equivalent stream that is
+ * <a href="package-summary.html#Ordering">unordered</a>. May return
+ * itself if the stream was already unordered.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @return an unordered stream
+ */
+ S unordered();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/CloseableStream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.util.stream;
+
+/**
+ * A {@code CloseableStream} is a {@code Stream} that can be closed.
+ * The close method is invoked to release resources that the object is
+ * holding (such as open files).
+ *
+ * @param <T> The type of stream elements
+ * @since 1.8
+ */
+public interface CloseableStream<T> extends Stream<T>, AutoCloseable {
+
+ /**
+ * Closes this resource, relinquishing any underlying resources.
+ * This method is invoked automatically on objects managed by the
+ * {@code try}-with-resources statement. Does nothing if called when
+ * the resource has already been closed.
+ *
+ * This method does not allow throwing checked {@code Exception}s like
+ * {@link AutoCloseable#close() AutoCloseable.close()}. Cases where the
+ * close operation may fail require careful attention by implementers. It
+ * is strongly advised to relinquish the underlying resources and to
+ * internally <em>mark</em> the resource as closed. The {@code close}
+ * method is unlikely to be invoked more than once and so this ensures
+ * that the resources are released in a timely manner. Furthermore it
+ * reduces problems that could arise when the resource wraps, or is
+ * wrapped, by another resource.
+ *
+ * @see AutoCloseable#close()
+ */
+ void close();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/Collector.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Collections;
+import java.util.Set;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Supplier;
+
+/**
+ * A <a href="package-summary.html#Reduction">reduction operation</a> that
+ * supports folding input elements into a cumulative result. The result may be
+ * a value or may be a mutable result container. Examples of operations
+ * accumulating results into a mutable result container include: accumulating
+ * input elements into a {@code Collection}; concatenating strings into a
+ * {@code StringBuilder}; computing summary information about elements such as
+ * sum, min, max, or average; computing "pivot table" summaries such as "maximum
+ * valued transaction by seller", etc. Reduction operations can be performed
+ * either sequentially or in parallel.
+ *
+ * <p>The following are examples of using the predefined {@code Collector}
+ * implementations in {@link Collectors} with the {@code Stream} API to perform
+ * mutable reduction tasks:
+ * <pre>{@code
+ * // Accumulate elements into a List
+ * List<String> list = stream.collect(Collectors.toList());
+ *
+ * // Accumulate elements into a TreeSet
+ * Set<String> list = stream.collect(Collectors.toCollection(TreeSet::new));
+ *
+ * // Convert elements to strings and concatenate them, separated by commas
+ * String joined = stream.map(Object::toString)
+ * .collect(Collectors.toStringJoiner(", "))
+ * .toString();
+ *
+ * // Find highest-paid employee
+ * Employee highestPaid = employees.stream()
+ * .collect(Collectors.maxBy(Comparators.comparing(Employee::getSalary)));
+ *
+ * // Group employees by department
+ * Map<Department, List<Employee>> byDept
+ * = employees.stream()
+ * .collect(Collectors.groupingBy(Employee::getDepartment));
+ *
+ * // Find highest-paid employee by department
+ * Map<Department, Employee> highestPaidByDept
+ * = employees.stream()
+ * .collect(Collectors.groupingBy(Employee::getDepartment,
+ * Collectors.maxBy(Comparators.comparing(Employee::getSalary))));
+ *
+ * // Partition students into passing and failing
+ * Map<Boolean, List<Student>> passingFailing =
+ * students.stream()
+ * .collect(Collectors.partitioningBy(s -> s.getGrade() >= PASS_THRESHOLD);
+ *
+ * }</pre>
+ *
+ * <p>A {@code Collector} is specified by three functions that work together to
+ * manage a result or result container. They are: creation of an initial
+ * result, incorporating a new data element into a result, and combining two
+ * results into one. The last function -- combining two results into one -- is
+ * used during parallel operations, where subsets of the input are accumulated
+ * in parallel, and then the subresults merged into a combined result. The
+ * result may be a mutable container or a value. If the result is mutable, the
+ * accumulation and combination functions may either mutate their left argument
+ * and return that (such as adding elements to a collection), or return a new
+ * result, in which case it should not perform any mutation.
+ *
+ * <p>Collectors also have a set of characteristics, including
+ * {@link Characteristics#CONCURRENT} and
+ * {@link Characteristics#STRICTLY_MUTATIVE}. These characteristics provide
+ * hints that can be used by a reduction implementation to provide better
+ * performance.
+ *
+ * <p>Libraries that implement reduction based on {@code Collector}, such as
+ * {@link Stream#collect(Collector)}, must adhere to the following constraints:
+ * <ul>
+ * <li>The first argument passed to the accumulator function, and both
+ * arguments passed to the combiner function, must be the result of a
+ * previous invocation of {@link #resultSupplier()}, {@link #accumulator()},
+ * or {@link #combiner()}.</li>
+ * <li>The implementation should not do anything with the result of any of
+ * the result supplier, accumulator, or combiner functions other than to
+ * pass them again to the accumulator or combiner functions, or return them
+ * to the caller of the reduction operation.</li>
+ * <li>If a result is passed to the accumulator or combiner function, and
+ * the same object is not returned from that function, it is never used
+ * again.</li>
+ * <li>Once a result is passed to the combiner function, it is never passed
+ * to the accumulator function again.</li>
+ * <li>For non-concurrent collectors, any result returned from the result
+ * supplier, accumulator, or combiner functions must be serially
+ * thread-confined. This enables collection to occur in parallel without
+ * the {@code Collector} needing to implement any additional synchronization.
+ * The reduction implementation must manage that the input is properly
+ * partitioned, that partitions are processed in isolation, and combining
+ * happens only after accumulation is complete.</li>
+ * <li>For concurrent collectors, an implementation is free to (but not
+ * required to) implement reduction concurrently. A concurrent reduction
+ * is one where the accumulator function is called concurrently from
+ * multiple threads, using the same concurrently-modifiable result container,
+ * rather than keeping the result isolated during accumulation.
+ * A concurrent reduction should only be applied if the collector has the
+ * {@link Characteristics#UNORDERED} characteristics or if the
+ * originating data is unordered.</li>
+ * </ul>
+ *
+ * @apiNote
+ * Performing a reduction operation with a {@code Collector} should produce a
+ * result equivalent to:
+ * <pre>{@code
+ * BiFunction<R,T,R> accumulator = collector.accumulator();
+ * R result = collector.resultSupplier().get();
+ * for (T t : data)
+ * result = accumulator.apply(result, t);
+ * return result;
+ * }</pre>
+ *
+ * <p>However, the library is free to partition the input, perform the reduction
+ * on the partitions, and then use the combiner function to combine the partial
+ * results to achieve a parallel reduction. Depending on the specific reduction
+ * operation, this may perform better or worse, depending on the relative cost
+ * of the accumulator and combiner functions.
+ *
+ * <p>An example of an operation that can be easily modeled by {@code Collector}
+ * is accumulating elements into a {@code TreeSet}. In this case, the {@code
+ * resultSupplier()} function is {@code () -> new Treeset<T>()}, the
+ * {@code accumulator} function is
+ * {@code (set, element) -> { set.add(element); return set; }}, and the combiner
+ * function is {@code (left, right) -> { left.addAll(right); return left; }}.
+ * (This behavior is implemented by
+ * {@code Collectors.toCollection(TreeSet::new)}).
+ *
+ * TODO Associativity and commutativity
+ *
+ * @see Stream#collect(Collector)
+ * @see Collectors
+ *
+ * @param <T> the type of input element to the collect operation
+ * @param <R> the result type of the collect operation
+ * @since 1.8
+ */
+public interface Collector<T, R> {
+ /**
+ * A function that creates and returns a new result that represents
+ * "no values". If the accumulator or combiner functions may mutate their
+ * arguments, this must be a new, empty result container.
+ *
+ * @return a function which, when invoked, returns a result representing
+ * "no values"
+ */
+ Supplier<R> resultSupplier();
+
+ /**
+ * A function that folds a new value into a cumulative result. The result
+ * may be a mutable result container or a value. The accumulator function
+ * may modify a mutable container and return it, or create a new result and
+ * return that, but if it returns a new result object, it must not modify
+ * any of its arguments.
+ *
+ * <p>If the collector has the {@link Characteristics#STRICTLY_MUTATIVE}
+ * characteristic, then the accumulator function <em>must</em> always return
+ * its first argument, after possibly mutating its state.
+ *
+ * @return a function which folds a new value into a cumulative result
+ */
+ BiFunction<R, T, R> accumulator();
+
+ /**
+ * A function that accepts two partial results and merges them. The
+ * combiner function may fold state from one argument into the other and
+ * return that, or may return a new result object, but if it returns
+ * a new result object, it must not modify the state of either of its
+ * arguments.
+ *
+ * <p>If the collector has the {@link Characteristics#STRICTLY_MUTATIVE}
+ * characteristic, then the combiner function <em>must</em> always return
+ * its first argument, after possibly mutating its state.
+ *
+ * @return a function which combines two partial results into a cumulative
+ * result
+ */
+ BinaryOperator<R> combiner();
+
+ /**
+ * Returns a {@code Set} of {@code Collector.Characteristics} indicating
+ * the characteristics of this Collector. This set should be immutable.
+ *
+ * @return an immutable set of collector characteristics
+ */
+ Set<Characteristics> characteristics();
+
+ /**
+ * Characteristics indicating properties of a {@code Collector}, which can
+ * be used to optimize reduction implementations.
+ */
+ enum Characteristics {
+ /**
+ * Indicates that this collector is <em>concurrent</em>, meaning that
+ * the result container can support the accumulator function being
+ * called concurrently with the same result container from multiple
+ * threads. Concurrent collectors must also always have the
+ * {@code STRICTLY_MUTATIVE} characteristic.
+ *
+ * <p>If a {@code CONCURRENT} collector is not also {@code UNORDERED},
+ * then it should only be evaluated concurrently if applied to an
+ * unordered data source.
+ */
+ CONCURRENT,
+
+ /**
+ * Indicates that the result container has no intrinsic order, such as
+ * a {@link Set}.
+ */
+ UNORDERED,
+
+ /**
+ * Indicates that this collector operates by strict mutation of its
+ * result container. This means that the {@link #accumulator()} and
+ * {@link #combiner()} functions will always modify the state of and
+ * return their first argument, rather than returning a different result
+ * container.
+ */
+ STRICTLY_MUTATIVE
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/DelegatingStream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.util.stream;
+
+import java.util.Comparator;
+import java.util.Iterator;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Spliterator;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongFunction;
+
+/**
+ * A {@code Stream} implementation that delegates operations to another {@code
+ * Stream}.
+ *
+ * @param <T> type of stream elements for this stream and underlying delegate
+ * stream
+ *
+ * @since 1.8
+ */
+public class DelegatingStream<T> implements Stream<T> {
+ final private Stream<T> delegate;
+
+ /**
+ * Construct a {@code Stream} that delegates operations to another {@code
+ * Stream}.
+ *
+ * @param delegate the underlying {@link Stream} to which we delegate all
+ * {@code Stream} methods
+ * @throws NullPointerException if the delegate is null
+ */
+ public DelegatingStream(Stream<T> delegate) {
+ this.delegate = Objects.requireNonNull(delegate);
+ }
+
+ // -- BaseStream methods --
+
+ @Override
+ public Spliterator<T> spliterator() {
+ return delegate.spliterator();
+ }
+
+ @Override
+ public boolean isParallel() {
+ return delegate.isParallel();
+ }
+
+ @Override
+ public Iterator<T> iterator() {
+ return delegate.iterator();
+ }
+
+ // -- Stream methods --
+
+ @Override
+ public Stream<T> filter(Predicate<? super T> predicate) {
+ return delegate.filter(predicate);
+ }
+
+ @Override
+ public <R> Stream<R> map(Function<? super T, ? extends R> mapper) {
+ return delegate.map(mapper);
+ }
+
+ @Override
+ public IntStream mapToInt(ToIntFunction<? super T> mapper) {
+ return delegate.mapToInt(mapper);
+ }
+
+ @Override
+ public LongStream mapToLong(ToLongFunction<? super T> mapper) {
+ return delegate.mapToLong(mapper);
+ }
+
+ @Override
+ public DoubleStream mapToDouble(ToDoubleFunction<? super T> mapper) {
+ return delegate.mapToDouble(mapper);
+ }
+
+ @Override
+ public <R> Stream<R> flatMap(Function<? super T, ? extends Stream<? extends R>> mapper) {
+ return delegate.flatMap(mapper);
+ }
+
+ @Override
+ public IntStream flatMapToInt(Function<? super T, ? extends IntStream> mapper) {
+ return delegate.flatMapToInt(mapper);
+ }
+
+ @Override
+ public LongStream flatMapToLong(Function<? super T, ? extends LongStream> mapper) {
+ return delegate.flatMapToLong(mapper);
+ }
+
+ @Override
+ public DoubleStream flatMapToDouble(Function<? super T, ? extends DoubleStream> mapper) {
+ return delegate.flatMapToDouble(mapper);
+ }
+
+ @Override
+ public Stream<T> distinct() {
+ return delegate.distinct();
+ }
+
+ @Override
+ public Stream<T> sorted() {
+ return delegate.sorted();
+ }
+
+ @Override
+ public Stream<T> sorted(Comparator<? super T> comparator) {
+ return delegate.sorted(comparator);
+ }
+
+ @Override
+ public void forEach(Consumer<? super T> action) {
+ delegate.forEach(action);
+ }
+
+ @Override
+ public void forEachOrdered(Consumer<? super T> action) {
+ delegate.forEachOrdered(action);
+ }
+
+ @Override
+ public Stream<T> peek(Consumer<? super T> consumer) {
+ return delegate.peek(consumer);
+ }
+
+ @Override
+ public Stream<T> limit(long maxSize) {
+ return delegate.limit(maxSize);
+ }
+
+ @Override
+ public Stream<T> substream(long startingOffset) {
+ return delegate.substream(startingOffset);
+ }
+
+ @Override
+ public Stream<T> substream(long startingOffset, long endingOffset) {
+ return delegate.substream(startingOffset, endingOffset);
+ }
+
+ @Override
+ public <A> A[] toArray(IntFunction<A[]> generator) {
+ return delegate.toArray(generator);
+ }
+
+ @Override
+ public Object[] toArray() {
+ return delegate.toArray();
+ }
+
+ @Override
+ public T reduce(T identity, BinaryOperator<T> accumulator) {
+ return delegate.reduce(identity, accumulator);
+ }
+
+ @Override
+ public Optional<T> reduce(BinaryOperator<T> accumulator) {
+ return delegate.reduce(accumulator);
+ }
+
+ @Override
+ public <U> U reduce(U identity, BiFunction<U, ? super T, U> accumulator,
+ BinaryOperator<U> combiner) {
+ return delegate.reduce(identity, accumulator, combiner);
+ }
+
+ @Override
+ public <R> R collect(Supplier<R> resultFactory,
+ BiConsumer<R, ? super T> accumulator,
+ BiConsumer<R, R> combiner) {
+ return delegate.collect(resultFactory, accumulator, combiner);
+ }
+
+ @Override
+ public <R> R collect(Collector<? super T, R> collector) {
+ return delegate.collect(collector);
+ }
+
+ @Override
+ public Optional<T> max(Comparator<? super T> comparator) {
+ return delegate.max(comparator);
+ }
+
+ @Override
+ public Optional<T> min(Comparator<? super T> comparator) {
+ return delegate.min(comparator);
+ }
+
+ @Override
+ public long count() {
+ return delegate.count();
+ }
+
+ @Override
+ public boolean anyMatch(Predicate<? super T> predicate) {
+ return delegate.anyMatch(predicate);
+ }
+
+ @Override
+ public boolean allMatch(Predicate<? super T> predicate) {
+ return delegate.allMatch(predicate);
+ }
+
+ @Override
+ public boolean noneMatch(Predicate<? super T> predicate) {
+ return delegate.noneMatch(predicate);
+ }
+
+ @Override
+ public Optional<T> findFirst() {
+ return delegate.findFirst();
+ }
+
+ @Override
+ public Optional<T> findAny() {
+ return delegate.findAny();
+ }
+
+ @Override
+ public Stream<T> unordered() {
+ return delegate.unordered();
+ }
+
+ @Override
+ public Stream<T> sequential() {
+ return delegate.sequential();
+ }
+
+ @Override
+ public Stream<T> parallel() {
+ return delegate.parallel();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/DoubleStream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,652 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.DoubleSummaryStatistics;
+import java.util.OptionalDouble;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.function.BiConsumer;
+import java.util.function.DoubleBinaryOperator;
+import java.util.function.DoubleConsumer;
+import java.util.function.DoubleFunction;
+import java.util.function.DoublePredicate;
+import java.util.function.DoubleToIntFunction;
+import java.util.function.DoubleToLongFunction;
+import java.util.function.DoubleUnaryOperator;
+import java.util.function.Function;
+import java.util.function.ObjDoubleConsumer;
+import java.util.function.Supplier;
+
+/**
+ * A sequence of primitive double elements supporting sequential and parallel
+ * bulk operations. Streams support lazy intermediate operations (transforming
+ * a stream to another stream) such as {@code filter} and {@code map}, and terminal
+ * operations (consuming the contents of a stream to produce a result or
+ * side-effect), such as {@code forEach}, {@code findFirst}, and {@code
+ * iterator}. Once an operation has been performed on a stream, it
+ * is considered <em>consumed</em> and no longer usable for other operations.
+ *
+ * <p>For sequential stream pipelines, all operations are performed in the
+ * <a href="package-summary.html#Ordering">encounter order</a> of the pipeline
+ * source, if the pipeline source has a defined encounter order.
+ *
+ * <p>For parallel stream pipelines, unless otherwise specified, intermediate
+ * stream operations preserve the <a href="package-summary.html#Ordering">
+ * encounter order</a> of their source, and terminal operations
+ * respect the encounter order of their source, if the source
+ * has an encounter order. Provided that and parameters to stream operations
+ * satisfy the <a href="package-summary.html#NonInterference">non-interference
+ * requirements</a>, and excepting differences arising from the absence of
+ * a defined encounter order, the result of a stream pipeline should be the
+ * stable across multiple executions of the same operations on the same source.
+ * However, the timing and thread in which side-effects occur (for those
+ * operations which are allowed to produce side-effects, such as
+ * {@link #forEach(DoubleConsumer)}), are explicitly nondeterministic for parallel
+ * execution of stream pipelines.
+ *
+ * <p>Unless otherwise noted, passing a {@code null} argument to any stream
+ * method may result in a {@link NullPointerException}.
+ *
+ * @apiNote
+ * Streams are not data structures; they do not manage the storage for their
+ * elements, nor do they support access to individual elements. However,
+ * you can use the {@link #iterator()} or {@link #spliterator()} operations to
+ * perform a controlled traversal.
+ *
+ * @since 1.8
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface DoubleStream extends BaseStream<Double, DoubleStream> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> predicate to apply to
+ * each element to determine if it should be included
+ * @return the new stream
+ */
+ DoubleStream filter(DoublePredicate predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to
+ * each element
+ * @return the new stream
+ */
+ DoubleStream map(DoubleUnaryOperator mapper);
+
+ /**
+ * Returns an object-valued {@code Stream} consisting of the results of
+ * applying the given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param <U> the element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ <U> Stream<U> mapToObj(DoubleFunction<? extends U> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ IntStream mapToInt(DoubleToIntFunction mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ LongStream mapToLong(DoubleToLongFunction mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of the stream produced by applying the
+ * provided mapping function to each element.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @apiNote
+ * The {@code flatMap()} operation has the effect of applying a one-to-many
+ * tranformation to the elements of the stream, and then flattening the
+ * resulting elements into a new stream. For example, if {@code orders}
+ * is a stream of purchase orders, and each purchase order contains a
+ * collection of line items, then the following produces a stream of line
+ * items:
+ * <pre>{@code
+ * orderStream.flatMap(order -> order.getLineItems().stream())...
+ * }</pre>
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to
+ * each element which produces an {@code DoubleStream} of new
+ * values
+ * @return the new stream
+ * @see Stream#flatMap(Function)
+ */
+ DoubleStream flatMap(DoubleFunction<? extends DoubleStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements of this stream. The
+ * elements are compared for equality according to
+ * {@link java.lang.Double#compare(double, double)}.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the result stream
+ */
+ DoubleStream distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream in sorted
+ * order. The elements are compared for equality according to
+ * {@link java.lang.Double#compare(double, double)}.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the result stream
+ */
+ DoubleStream sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * list.stream()
+ * .filter(filteringFunction)
+ * .peek(e -> {System.out.println("Filtered value: " + e); });
+ * .map(mappingFunction)
+ * .peek(e -> {System.out.println("Mapped value: " + e); });
+ * .collect(Collectors.toDoubleSummaryStastistics());
+ * }</pre>
+ *
+ * @param consumer a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ DoubleStream peek(DoubleConsumer consumer);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ DoubleStream limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream. If the
+ * {@code startInclusive} index lies past the end of this stream then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @param startInclusive the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} is negative
+ */
+ DoubleStream substream(long startInclusive);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream and
+ * truncated to contain no more than {@code endExclusive - startInclusive}
+ * elements. If the {@code startInclusive} index lies past the end
+ * of this stream then an empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param startInclusive the starting position of the substream, inclusive
+ * @param endExclusive the ending position of the substream, exclusive
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} or
+ * {@code endExclusive} is negative or {@code startInclusive} is greater
+ * than {@code endExclusive}
+ */
+ DoubleStream substream(long startInclusive, long endExclusive);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(DoubleConsumer action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(DoubleConsumer)
+ */
+ void forEachOrdered(DoubleConsumer action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ double[] toArray();
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * double result = identity;
+ * for (double element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code x},
+ * {@code accumulator.apply(identity, x)} is equal to {@code x}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, and average are all special cases of reduction.
+ * Summing a stream of numbers can be expressed as:
+
+ * <pre>{@code
+ * double sum = numbers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * double sum = numbers.reduce(0, Double::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param op an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #sum()
+ * @see #min()
+ * @see #max()
+ * @see #average()
+ */
+ double reduce(double identity, DoubleBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code OptionalDouble} describing the reduced
+ * value, if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * double result = null;
+ * for (double element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.apply(result, element);
+ * }
+ * return foundAny ? OptionalDouble.of(result) : OptionalDouble.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param op an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(double, DoubleBinaryOperator)
+ */
+ OptionalDouble reduce(DoubleBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable value holder,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result, rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = resultFactory.get();
+ * for (double element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(double, DoubleBinaryOperator)}, {@code collect}
+ * operations can be parallelized without requiring additional
+ * synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <R> type of the result
+ * @param resultFactory a function that creates a new result container.
+ * For a parallel execution, this function may be
+ * called multiple times and must return a fresh value
+ * each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for incorporating an additional
+ * element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values, which
+ * must be compatible with the accumulator function
+ * @return the result of the reduction
+ * @see Stream#collect(Supplier, BiConsumer, BiConsumer)
+ */
+ <R> R collect(Supplier<R> resultFactory,
+ ObjDoubleConsumer<R> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Returns the sum of elements in this stream. The sum returned can vary
+ * depending upon the order in which elements are encountered. This is due
+ * to accumulated rounding error in addition of values of differing
+ * magnitudes. Elements sorted by increasing absolute magnitude tend to
+ * yield more accurate results. If any stream element is a {@code NaN} or
+ * the sum is at any point a {@code NaN} then the sum will be {@code NaN}.
+ * This is a special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return reduce(0, Double::sum);
+ * }</pre>
+ *
+ * @return the sum of elements in this stream
+ */
+ double sum();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the minimum element of this
+ * stream, or an empty OptionalDouble if this stream is empty. The minimum
+ * element will be {@code Double.NaN} if any stream element was NaN. Unlike
+ * the numerical comparison operators, this method considers negative zero
+ * to be strictly smaller than positive zero. This is a special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return reduce(Double::min);
+ * }</pre>
+ *
+ * @return an {@code OptionalDouble} containing the minimum element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble min();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the maximum element of this
+ * stream, or an empty OptionalDouble if this stream is empty. The maximum
+ * element will be {@code Double.NaN} if any stream element was NaN. Unlike
+ * the numerical comparison operators, this method considers negative zero
+ * to be strictly smaller than positive zero. This is a
+ * special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return reduce(Double::max);
+ * }</pre>
+ *
+ * @return an {@code OptionalDouble} containing the maximum element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble max();
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return mapToLong(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the average of elements of
+ * this stream, or an empty optional if this stream is empty. The average
+ * returned can vary depending upon the order in which elements are
+ * encountered. This is due to accumulated rounding error in addition of
+ * elements of differing magnitudes. Elements sorted by increasing absolute
+ * magnitude tend to yield more accurate results. If any recorded value is
+ * a {@code NaN} or the sum is at any point a {@code NaN} then the average
+ * will be {@code NaN}. This is a special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * @return an {@code OptionalDouble} containing the average element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble average();
+
+ /**
+ * Returns a {@code DoubleSummaryStatistics} describing various summary data
+ * about the elements of this stream. This is a special
+ * case of a <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * @return a {@code DoubleSummaryStatistics} describing various summary data
+ * about the elements of this stream
+ */
+ DoubleSummaryStatistics summaryStatistics();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean anyMatch(DoublePredicate predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if all elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean allMatch(DoublePredicate predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if no elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean noneMatch(DoublePredicate predicate);
+
+ /**
+ * Returns an {@link OptionalDouble} describing the first element of this
+ * stream (in the encounter order), or an empty {@code OptionalDouble} if
+ * the stream is empty. If the stream has no encounter order, than any
+ * element may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code OptionalDouble} describing the first element of this
+ * stream, or an empty {@code OptionalDouble} if the stream is empty
+ */
+ OptionalDouble findFirst();
+
+ /**
+ * Returns an {@link OptionalDouble} describing some element of the stream,
+ * or an empty {@code OptionalDouble} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If the first element
+ * in the encounter order is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code OptionalDouble} describing some element of this stream,
+ * or an empty {@code OptionalDouble} if the stream is empty
+ * @see #findFirst()
+ */
+ OptionalDouble findAny();
+
+ /**
+ * Returns a {@code Stream} consisting of the elements of this stream,
+ * boxed to {@code Double}.
+ *
+ * @return a {@code Stream} consistent of the elements of this stream,
+ * each boxed to a {@code Double}
+ */
+ Stream<Double> boxed();
+
+ @Override
+ DoubleStream sequential();
+
+ @Override
+ DoubleStream parallel();
+
+ @Override
+ PrimitiveIterator.OfDouble iterator();
+
+ @Override
+ Spliterator.OfDouble spliterator();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/FindOps.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,317 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Optional;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
+import java.util.Spliterator;
+import java.util.concurrent.CountedCompleter;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+/**
+ * Factory for instances of a short-circuiting {@code TerminalOp} that searches
+ * for an element in a stream pipeline, and terminates when it finds one.
+ * Supported variants include find-first (find the first element in the
+ * encounter order) and find-any (find any element, may not be the first in
+ * encounter order.)
+ *
+ * @since 1.8
+ */
+final class FindOps {
+
+ private FindOps() { }
+
+ /**
+ * Constructs a {@code TerminalOp} for streams of objects.
+ *
+ * @param <T> the type of elements of the stream
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static <T> TerminalOp<T, Optional<T>> makeRef(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.REFERENCE, Optional.empty(),
+ Optional::isPresent, FindSink.OfRef::new);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} for streams of ints.
+ *
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static TerminalOp<Integer, OptionalInt> makeInt(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.INT_VALUE, OptionalInt.empty(),
+ OptionalInt::isPresent, FindSink.OfInt::new);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} for streams of longs.
+ *
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static TerminalOp<Long, OptionalLong> makeLong(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.LONG_VALUE, OptionalLong.empty(),
+ OptionalLong::isPresent, FindSink.OfLong::new);
+ }
+
+ /**
+ * Constructs a {@code FindOp} for streams of doubles.
+ *
+ * @param mustFindFirst whether the {@code TerminalOp} must produce the
+ * first element in the encounter order
+ * @return a {@code TerminalOp} implementing the find operation
+ */
+ public static TerminalOp<Double, OptionalDouble> makeDouble(boolean mustFindFirst) {
+ return new FindOp<>(mustFindFirst, StreamShape.DOUBLE_VALUE, OptionalDouble.empty(),
+ OptionalDouble::isPresent, FindSink.OfDouble::new);
+ }
+
+ /**
+ * A short-circuiting {@code TerminalOp} that searches for an element in a
+ * stream pipeline, and terminates when it finds one. Implements both
+ * find-first (find the first element in the encounter order) and find-any
+ * (find any element, may not be the first in encounter order.)
+ *
+ * @param <T> the output type of the stream pipeline
+ * @param <O> the result type of the find operation, typically an optional
+ * type
+ */
+ private static final class FindOp<T, O> implements TerminalOp<T, O> {
+ private final StreamShape shape;
+ final boolean mustFindFirst;
+ final O emptyValue;
+ final Predicate<O> presentPredicate;
+ final Supplier<TerminalSink<T, O>> sinkSupplier;
+
+ /**
+ * Constructs a {@code FindOp}.
+ *
+ * @param mustFindFirst if true, must find the first element in
+ * encounter order, otherwise can find any element
+ * @param shape stream shape of elements to search
+ * @param emptyValue result value corresponding to "found nothing"
+ * @param presentPredicate {@code Predicate} on result value
+ * corresponding to "found something"
+ * @param sinkSupplier supplier for a {@code TerminalSink} implementing
+ * the matching functionality
+ */
+ FindOp(boolean mustFindFirst,
+ StreamShape shape,
+ O emptyValue,
+ Predicate<O> presentPredicate,
+ Supplier<TerminalSink<T, O>> sinkSupplier) {
+ this.mustFindFirst = mustFindFirst;
+ this.shape = shape;
+ this.emptyValue = emptyValue;
+ this.presentPredicate = presentPredicate;
+ this.sinkSupplier = sinkSupplier;
+ }
+
+ @Override
+ public int getOpFlags() {
+ return StreamOpFlag.IS_SHORT_CIRCUIT | (mustFindFirst ? 0 : StreamOpFlag.NOT_ORDERED);
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return shape;
+ }
+
+ @Override
+ public <S> O evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ O result = helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).get();
+ return result != null ? result : emptyValue;
+ }
+
+ @Override
+ public <P_IN> O evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<P_IN> spliterator) {
+ return new FindTask<>(this, helper, spliterator).invoke();
+ }
+ }
+
+ /**
+ * Implementation of @{code TerminalSink} that implements the find
+ * functionality, requesting cancellation when something has been found
+ *
+ * @param <T> The type of input element
+ * @param <O> The result type, typically an optional type
+ */
+ private static abstract class FindSink<T, O> implements TerminalSink<T, O> {
+ boolean hasValue;
+ T value;
+
+ FindSink() {} // Avoid creation of special accessor
+
+ @Override
+ public void accept(T value) {
+ if (!hasValue) {
+ hasValue = true;
+ this.value = value;
+ }
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return hasValue;
+ }
+
+ /** Specialization of {@code FindSink} for reference streams */
+ static final class OfRef<T> extends FindSink<T, Optional<T>> {
+ @Override
+ public Optional<T> get() {
+ return hasValue ? Optional.of(value) : null;
+ }
+ }
+
+ /** Specialization of {@code FindSink} for int streams */
+ static final class OfInt extends FindSink<Integer, OptionalInt>
+ implements Sink.OfInt {
+ @Override
+ public void accept(int value) {
+ // Boxing is OK here, since few values will actually flow into the sink
+ accept((Integer) value);
+ }
+
+ @Override
+ public OptionalInt get() {
+ return hasValue ? OptionalInt.of(value) : null;
+ }
+ }
+
+ /** Specialization of {@code FindSink} for long streams */
+ static final class OfLong extends FindSink<Long, OptionalLong>
+ implements Sink.OfLong {
+ @Override
+ public void accept(long value) {
+ // Boxing is OK here, since few values will actually flow into the sink
+ accept((Long) value);
+ }
+
+ @Override
+ public OptionalLong get() {
+ return hasValue ? OptionalLong.of(value) : null;
+ }
+ }
+
+ /** Specialization of {@code FindSink} for double streams */
+ static final class OfDouble extends FindSink<Double, OptionalDouble>
+ implements Sink.OfDouble {
+ @Override
+ public void accept(double value) {
+ // Boxing is OK here, since few values will actually flow into the sink
+ accept((Double) value);
+ }
+
+ @Override
+ public OptionalDouble get() {
+ return hasValue ? OptionalDouble.of(value) : null;
+ }
+ }
+ }
+
+ /**
+ * {@code ForkJoinTask} implementing parallel short-circuiting search
+ * @param <P_IN> Input element type to the stream pipeline
+ * @param <P_OUT> Output element type from the stream pipeline
+ * @param <O> Result type from the find operation
+ */
+ private static final class FindTask<P_IN, P_OUT, O>
+ extends AbstractShortCircuitTask<P_IN, P_OUT, O, FindTask<P_IN, P_OUT, O>> {
+ private final FindOp<P_OUT, O> op;
+
+ FindTask(FindOp<P_OUT, O> op,
+ PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ this.op = op;
+ }
+
+ FindTask(FindTask<P_IN, P_OUT, O> parent, Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ this.op = parent.op;
+ }
+
+ @Override
+ protected FindTask<P_IN, P_OUT, O> makeChild(Spliterator<P_IN> spliterator) {
+ return new FindTask<>(this, spliterator);
+ }
+
+ @Override
+ protected O getEmptyResult() {
+ return op.emptyValue;
+ }
+
+ private void foundResult(O answer) {
+ if (isLeftmostNode())
+ shortCircuit(answer);
+ else
+ cancelLaterNodes();
+ }
+
+ @Override
+ protected O doLeaf() {
+ O result = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).get();
+ if (!op.mustFindFirst) {
+ if (result != null)
+ shortCircuit(result);
+ return null;
+ }
+ else {
+ if (result != null) {
+ foundResult(result);
+ return result;
+ }
+ else
+ return null;
+ }
+ }
+
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ if (op.mustFindFirst) {
+ for (FindTask<P_IN, P_OUT, O> child = leftChild, p = null; child != p;
+ p = child, child = rightChild) {
+ O result = child.getLocalResult();
+ if (result != null && op.presentPredicate.test(result)) {
+ setLocalResult(result);
+ foundResult(result);
+ break;
+ }
+ }
+ }
+ super.onCompletion(caller);
+ }
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/ForEachOps.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.CountedCompleter;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.LongConsumer;
+
+/**
+ * Factory for creating instances of {@code TerminalOp} that perform an
+ * action for every element of a stream. Supported variants include unordered
+ * traversal (elements are provided to the {@code Consumer} as soon as they are
+ * available), and ordered traversal (elements are provided to the
+ * {@code Consumer} in encounter order.)
+ *
+ * <p>Elements are provided to the {@code Consumer} on whatever thread and
+ * whatever order they become available. For ordered traversals, it is
+ * guaranteed that processing an element <em>happens-before</em> processing
+ * subsequent elements in the encounter order.
+ *
+ * <p>Exceptions occurring as a result of sending an element to the
+ * {@code Consumer} will be relayed to the caller and traversal will be
+ * prematurely terminated.
+ *
+ * @since 1.8
+ */
+final class ForEachOps {
+
+ private ForEachOps() { }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of a stream.
+ *
+ * @param action the {@code Consumer} that receives all elements of a
+ * stream
+ * @param ordered whether an ordered traversal is requested
+ * @param <T> the type of the stream elements
+ * @return the {@code TerminalOp} instance
+ */
+ public static <T> TerminalOp<T, Void> makeRef(Consumer<? super T> action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfRef<>(action, ordered);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of an {@code IntStream}.
+ *
+ * @param action the {@code IntConsumer} that receives all elements of a
+ * stream
+ * @param ordered whether an ordered traversal is requested
+ * @return the {@code TerminalOp} instance
+ */
+ public static TerminalOp<Integer, Void> makeInt(IntConsumer action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfInt(action, ordered);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of a {@code LongStream}.
+ *
+ * @param action the {@code LongConsumer} that receives all elements of a
+ * stream
+ * @param ordered whether an ordered traversal is requested
+ * @return the {@code TerminalOp} instance
+ */
+ public static TerminalOp<Long, Void> makeLong(LongConsumer action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfLong(action, ordered);
+ }
+
+ /**
+ * Constructs a {@code TerminalOp} that perform an action for every element
+ * of a {@code DoubleStream}.
+ *
+ * @param action the {@code DoubleConsumer} that receives all elements of
+ * a stream
+ * @param ordered whether an ordered traversal is requested
+ * @return the {@code TerminalOp} instance
+ */
+ public static TerminalOp<Double, Void> makeDouble(DoubleConsumer action,
+ boolean ordered) {
+ Objects.requireNonNull(action);
+ return new ForEachOp.OfDouble(action, ordered);
+ }
+
+ /**
+ * A {@code TerminalOp} that evaluates a stream pipeline and sends the
+ * output to itself as a {@code TerminalSink}. Elements will be sent in
+ * whatever thread they become available. If the traversal is unordered,
+ * they will be sent independent of the stream's encounter order.
+ *
+ * <p>This terminal operation is stateless. For parallel evaluation, each
+ * leaf instance of a {@code ForEachTask} will send elements to the same
+ * {@code TerminalSink} reference that is an instance of this class.
+ *
+ * @param <T> the output type of the stream pipeline
+ */
+ private static abstract class ForEachOp<T>
+ implements TerminalOp<T, Void>, TerminalSink<T, Void> {
+ private final boolean ordered;
+
+ protected ForEachOp(boolean ordered) {
+ this.ordered = ordered;
+ }
+
+ // TerminalOp
+
+ @Override
+ public int getOpFlags() {
+ return ordered ? 0 : StreamOpFlag.NOT_ORDERED;
+ }
+
+ @Override
+ public <S> Void evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ return helper.wrapAndCopyInto(this, spliterator).get();
+ }
+
+ @Override
+ public <S> Void evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ if (ordered)
+ new ForEachOrderedTask<>(helper, spliterator, this).invoke();
+ else
+ new ForEachTask<>(helper, spliterator, helper.wrapSink(this)).invoke();
+ return null;
+ }
+
+ // TerminalSink
+
+ @Override
+ public Void get() {
+ return null;
+ }
+
+ // Implementations
+
+ /** Implementation class for reference streams */
+ private static class OfRef<T> extends ForEachOp<T> {
+ final Consumer<? super T> consumer;
+
+ OfRef(Consumer<? super T> consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public void accept(T t) {
+ consumer.accept(t);
+ }
+ }
+
+ /** Implementation class for {@code IntStream} */
+ private static class OfInt extends ForEachOp<Integer>
+ implements Sink.OfInt {
+ final IntConsumer consumer;
+
+ OfInt(IntConsumer consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return StreamShape.INT_VALUE;
+ }
+
+ @Override
+ public void accept(int t) {
+ consumer.accept(t);
+ }
+ }
+
+ /** Implementation class for {@code LongStream} */
+ private static class OfLong extends ForEachOp<Long>
+ implements Sink.OfLong {
+ final LongConsumer consumer;
+
+ OfLong(LongConsumer consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return StreamShape.LONG_VALUE;
+ }
+
+ @Override
+ public void accept(long t) {
+ consumer.accept(t);
+ }
+ }
+
+ /** Implementation class for {@code DoubleStream} */
+ private static class OfDouble extends ForEachOp<Double>
+ implements Sink.OfDouble {
+ final DoubleConsumer consumer;
+
+ OfDouble(DoubleConsumer consumer, boolean ordered) {
+ super(ordered);
+ this.consumer = consumer;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return StreamShape.DOUBLE_VALUE;
+ }
+
+ @Override
+ public void accept(double t) {
+ consumer.accept(t);
+ }
+ }
+ }
+
+ /** A {@code ForkJoinTask} for performing a parallel for-each operation */
+ private static class ForEachTask<S, T> extends CountedCompleter<Void> {
+ private Spliterator<S> spliterator;
+ private final Sink<S> sink;
+ private final PipelineHelper<T> helper;
+ private final long targetSize;
+
+ ForEachTask(PipelineHelper<T> helper,
+ Spliterator<S> spliterator,
+ Sink<S> sink) {
+ super(null);
+ this.spliterator = spliterator;
+ this.sink = sink;
+ this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize());
+ this.helper = helper;
+ }
+
+ ForEachTask(ForEachTask<S, T> parent, Spliterator<S> spliterator) {
+ super(parent);
+ this.spliterator = spliterator;
+ this.sink = parent.sink;
+ this.targetSize = parent.targetSize;
+ this.helper = parent.helper;
+ }
+
+ public void compute() {
+ boolean isShortCircuit = StreamOpFlag.SHORT_CIRCUIT.isKnown(helper.getStreamAndOpFlags());
+ while (true) {
+ if (isShortCircuit && sink.cancellationRequested()) {
+ propagateCompletion();
+ spliterator = null;
+ return;
+ }
+
+ Spliterator<S> split;
+ if (!AbstractTask.suggestSplit(spliterator, targetSize)
+ || (split = spliterator.trySplit()) == null) {
+ helper.copyInto(sink, spliterator);
+ propagateCompletion();
+ spliterator = null;
+ return;
+ }
+ else {
+ addToPendingCount(1);
+ new ForEachTask<>(this, split).fork();
+ }
+ }
+ }
+ }
+
+ /**
+ * A {@code ForkJoinTask} for performing a parallel for-each operation
+ * which visits the elements in encounter order
+ */
+ private static class ForEachOrderedTask<S, T> extends CountedCompleter<Void> {
+ private final PipelineHelper<T> helper;
+ private Spliterator<S> spliterator;
+ private final long targetSize;
+ private final ConcurrentHashMap<ForEachOrderedTask<S, T>, ForEachOrderedTask<S, T>> completionMap;
+ private final Sink<T> action;
+ private final Object lock;
+ private final ForEachOrderedTask<S, T> leftPredecessor;
+ private Node<T> node;
+
+ protected ForEachOrderedTask(PipelineHelper<T> helper,
+ Spliterator<S> spliterator,
+ Sink<T> action) {
+ super(null);
+ this.helper = helper;
+ this.spliterator = spliterator;
+ this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize());
+ this.completionMap = new ConcurrentHashMap<>();
+ this.action = action;
+ this.lock = new Object();
+ this.leftPredecessor = null;
+ }
+
+ ForEachOrderedTask(ForEachOrderedTask<S, T> parent,
+ Spliterator<S> spliterator,
+ ForEachOrderedTask<S, T> leftPredecessor) {
+ super(parent);
+ this.helper = parent.helper;
+ this.spliterator = spliterator;
+ this.targetSize = parent.targetSize;
+ this.completionMap = parent.completionMap;
+ this.action = parent.action;
+ this.lock = parent.lock;
+ this.leftPredecessor = leftPredecessor;
+ }
+
+ @Override
+ public final void compute() {
+ doCompute(this);
+ }
+
+ private static<S, T> void doCompute(ForEachOrderedTask<S, T> task) {
+ while (true) {
+ Spliterator<S> split;
+ if (!AbstractTask.suggestSplit(task.spliterator, task.targetSize)
+ || (split = task.spliterator.trySplit()) == null) {
+ if (task.getPendingCount() == 0) {
+ task.helper.wrapAndCopyInto(task.action, task.spliterator);
+ }
+ else {
+ Node.Builder<T> nb = task.helper.makeNodeBuilder(
+ task.helper.exactOutputSizeIfKnown(task.spliterator),
+ size -> (T[]) new Object[size]);
+ task.node = task.helper.wrapAndCopyInto(nb, task.spliterator).build();
+ }
+ task.tryComplete();
+ return;
+ }
+ else {
+ ForEachOrderedTask<S, T> leftChild = new ForEachOrderedTask<>(task, split, task.leftPredecessor);
+ ForEachOrderedTask<S, T> rightChild = new ForEachOrderedTask<>(task, task.spliterator, leftChild);
+ task.completionMap.put(leftChild, rightChild);
+ task.addToPendingCount(1); // forking
+ rightChild.addToPendingCount(1); // right pending on left child
+ if (task.leftPredecessor != null) {
+ leftChild.addToPendingCount(1); // left pending on previous subtree, except left spine
+ if (task.completionMap.replace(task.leftPredecessor, task, leftChild))
+ task.addToPendingCount(-1); // transfer my "right child" count to my left child
+ else
+ leftChild.addToPendingCount(-1); // left child is ready to go when ready
+ }
+ leftChild.fork();
+ task = rightChild;
+ }
+ }
+ }
+
+ @Override
+ public void onCompletion(CountedCompleter<?> caller) {
+ spliterator = null;
+ if (node != null) {
+ // Dump any data from this leaf into the sink
+ synchronized (lock) {
+ node.forEach(action);
+ }
+ node = null;
+ }
+ ForEachOrderedTask<S, T> victim = completionMap.remove(this);
+ if (victim != null)
+ victim.tryComplete();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/IntStream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,655 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.IntSummaryStatistics;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.function.IntBinaryOperator;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.IntPredicate;
+import java.util.function.IntToDoubleFunction;
+import java.util.function.IntToLongFunction;
+import java.util.function.IntUnaryOperator;
+import java.util.function.ObjIntConsumer;
+import java.util.function.Supplier;
+
+/**
+ * A sequence of primitive integer elements supporting sequential and parallel
+ * bulk operations. Streams support lazy intermediate operations (transforming
+ * a stream to another stream) such as {@code filter} and {@code map}, and terminal
+ * operations (consuming the contents of a stream to produce a result or
+ * side-effect), such as {@code forEach}, {@code findFirst}, and {@code
+ * iterator}. Once an operation has been performed on a stream, it
+ * is considered <em>consumed</em> and no longer usable for other operations.
+ *
+ * <p>For sequential stream pipelines, all operations are performed in the
+ * <a href="package-summary.html#Ordering">encounter order</a> of the pipeline
+ * source, if the pipeline source has a defined encounter order.
+ *
+ * <p>For parallel stream pipelines, unless otherwise specified, intermediate
+ * stream operations preserve the <a href="package-summary.html#Ordering">
+ * encounter order</a> of their source, and terminal operations
+ * respect the encounter order of their source, if the source
+ * has an encounter order. Provided that and parameters to stream operations
+ * satisfy the <a href="package-summary.html#NonInterference">non-interference
+ * requirements</a>, and excepting differences arising from the absence of
+ * a defined encounter order, the result of a stream pipeline should be the
+ * stable across multiple executions of the same operations on the same source.
+ * However, the timing and thread in which side-effects occur (for those
+ * operations which are allowed to produce side-effects, such as
+ * {@link #forEach(IntConsumer)}), are explicitly nondeterministic for parallel
+ * execution of stream pipelines.
+ *
+ * <p>Unless otherwise noted, passing a {@code null} argument to any stream
+ * method may result in a {@link NullPointerException}.
+ *
+ * @apiNote
+ * Streams are not data structures; they do not manage the storage for their
+ * elements, nor do they support access to individual elements. However,
+ * you can use the {@link #iterator()} or {@link #spliterator()} operations to
+ * perform a controlled traversal.
+ *
+ * @since 1.8
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface IntStream extends BaseStream<Integer, IntStream> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> predicate to apply to
+ * each element to determine if it should be included
+ * @return the new stream
+ */
+ IntStream filter(IntPredicate predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ IntStream map(IntUnaryOperator mapper);
+
+ /**
+ * Returns an object-valued {@code Stream} consisting of the results of
+ * applying the given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param <U> the element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ <U> Stream<U> mapToObj(IntFunction<? extends U> mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ LongStream mapToLong(IntToLongFunction mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ DoubleStream mapToDouble(IntToDoubleFunction mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of the stream produced by applying the
+ * provided mapping function to each element.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @apiNote
+ * The {@code flatMap()} operation has the effect of applying a one-to-many
+ * tranformation to the elements of the stream, and then flattening the
+ * resulting elements into a new stream. For example, if {@code orders}
+ * is a stream of purchase orders, and each purchase order contains a
+ * collection of line items, then the following produces a stream of line
+ * items:
+ * <pre>{@code
+ * orderStream.flatMap(order -> order.getLineItems().stream())...
+ * }</pre>
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to
+ * each element which produces an {@code IntStream} of new
+ * values
+ * @return the new stream
+ * @see Stream#flatMap(Function)
+ */
+ IntStream flatMap(IntFunction<? extends IntStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ IntStream distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream in sorted
+ * order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ IntStream sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * list.stream()
+ * .filter(filteringFunction)
+ * .peek(e -> {System.out.println("Filtered value: " + e); });
+ * .map(mappingFunction)
+ * .peek(e -> {System.out.println("Mapped value: " + e); });
+ * .collect(Collectors.toIntSummaryStastistics());
+ * }</pre>
+ *
+ * @param consumer a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ IntStream peek(IntConsumer consumer);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ IntStream limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream. If the
+ * {@code startInclusive} index lies past the end of this stream then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @param startInclusive the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} is negative
+ */
+ IntStream substream(long startInclusive);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream and
+ * truncated to contain no more than {@code endExclusive - startInclusive}
+ * elements. If the {@code startInclusive} index lies past the end
+ * of this stream then an empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param startInclusive the starting position of the substream, inclusive
+ * @param endExclusive the ending position of the substream, exclusive
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} or
+ * {@code endExclusive} is negative or {@code startInclusive} is greater
+ * than {@code endExclusive}
+ */
+ IntStream substream(long startInclusive, long endExclusive);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(IntConsumer action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(IntConsumer)
+ */
+ void forEachOrdered(IntConsumer action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ int[] toArray();
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * int result = identity;
+ * for (int element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code x},
+ * {@code accumulator.apply(identity, x)} is equal to {@code x}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, and average are all special cases of reduction.
+ * Summing a stream of numbers can be expressed as:
+ *
+ * <pre>{@code
+ * int sum = integers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * int sum = integers.reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param op an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #sum()
+ * @see #min()
+ * @see #max()
+ * @see #average()
+ */
+ int reduce(int identity, IntBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code OptionalInt} describing the reduced value,
+ * if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * int result = null;
+ * for (int element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.apply(result, element);
+ * }
+ * return foundAny ? OptionalInt.of(result) : OptionalInt.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param op an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(int, IntBinaryOperator)
+ */
+ OptionalInt reduce(IntBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable value holder,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result, rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = resultFactory.get();
+ * for (int element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(int, IntBinaryOperator)}, {@code collect} operations
+ * can be parallelized without requiring additional synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <R> type of the result
+ * @param resultFactory a function that creates a new result container.
+ * For a parallel execution, this function may be
+ * called multiple times and must return a fresh value
+ * each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for incorporating an additional
+ * element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values, which
+ * must be compatible with the accumulator function
+ * @return the result of the reduction
+ * @see Stream#collect(Supplier, BiConsumer, BiConsumer)
+ */
+ <R> R collect(Supplier<R> resultFactory,
+ ObjIntConsumer<R> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Returns the sum of elements in this stream. This is a special case
+ * of a <a href="package-summary.html#MutableReduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * @return the sum of elements in this stream
+ */
+ int sum();
+
+ /**
+ * Returns an {@code OptionalInt} describing the minimum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#MutableReduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Integer::min);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+
+ * @return an {@code OptionalInt} containing the minimum element of this
+ * stream, or an empty {@code OptionalInt} if the stream is empty
+ */
+ OptionalInt min();
+
+ /**
+ * Returns an {@code OptionalInt} describing the maximum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#MutableReduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Integer::max);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalInt} containing the maximum element of this
+ * stream, or an empty {@code OptionalInt} if the stream is empty
+ */
+ OptionalInt max();
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return mapToLong(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the average of elements of
+ * this stream, or an empty optional if this stream is empty. This is a
+ * special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * @return an {@code OptionalDouble} containing the average element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble average();
+
+ /**
+ * Returns an {@code IntSummaryStatistics} describing various
+ * summary data about the elements of this stream. This is a special
+ * case of a <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * @return an {@code IntSummaryStatistics} describing various summary data
+ * about the elements of this stream
+ */
+ IntSummaryStatistics summaryStatistics();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean anyMatch(IntPredicate predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if all elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean allMatch(IntPredicate predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if no elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean noneMatch(IntPredicate predicate);
+
+ /**
+ * Returns an {@link OptionalInt} describing the first element of this
+ * stream (in the encounter order), or an empty {@code OptionalInt} if the
+ * stream is empty. If the stream has no encounter order, than any element
+ * may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code OptionalInt} describing the first element of this stream,
+ * or an empty {@code OptionalInt} if the stream is empty
+ */
+ OptionalInt findFirst();
+
+ /**
+ * Returns an {@link OptionalInt} describing some element of the stream, or
+ * an empty {@code OptionalInt} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If the first element
+ * in the encounter order is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code OptionalInt} describing some element of this stream, or
+ * an empty {@code OptionalInt} if the stream is empty
+ * @see #findFirst()
+ */
+ OptionalInt findAny();
+
+ /**
+ * Returns a {@code LongStream} consisting of the elements of this stream,
+ * converted to {@code long}.
+ *
+ * @return a {@code LongStream} consisting of the elements of this stream,
+ * converted to {@code long}
+ */
+ LongStream longs();
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}.
+ *
+ * @return a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}
+ */
+ DoubleStream doubles();
+
+ /**
+ * Returns a {@code Stream} consisting of the elements of this stream,
+ * each boxed to an {@code Integer}.
+ *
+ * @return a {@code Stream} consistent of the elements of this stream,
+ * each boxed to an {@code Integer}
+ */
+ Stream<Integer> boxed();
+
+ @Override
+ IntStream sequential();
+
+ @Override
+ IntStream parallel();
+
+ @Override
+ PrimitiveIterator.OfInt iterator();
+
+ @Override
+ Spliterator.OfInt spliterator();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/LongStream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.LongSummaryStatistics;
+import java.util.OptionalDouble;
+import java.util.OptionalLong;
+import java.util.PrimitiveIterator;
+import java.util.Spliterator;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.function.LongBinaryOperator;
+import java.util.function.LongConsumer;
+import java.util.function.LongFunction;
+import java.util.function.LongPredicate;
+import java.util.function.LongToDoubleFunction;
+import java.util.function.LongToIntFunction;
+import java.util.function.LongUnaryOperator;
+import java.util.function.ObjLongConsumer;
+import java.util.function.Supplier;
+
+/**
+ * A sequence of primitive long elements supporting sequential and parallel
+ * bulk operations. Streams support lazy intermediate operations (transforming
+ * a stream to another stream) such as {@code filter} and {@code map}, and terminal
+ * operations (consuming the contents of a stream to produce a result or
+ * side-effect), such as {@code forEach}, {@code findFirst}, and {@code
+ * iterator}. Once an operation has been performed on a stream, it
+ * is considered <em>consumed</em> and no longer usable for other operations.
+ *
+ * <p>For sequential stream pipelines, all operations are performed in the
+ * <a href="package-summary.html#Ordering">encounter order</a> of the pipeline
+ * source, if the pipeline source has a defined encounter order.
+ *
+ * <p>For parallel stream pipelines, unless otherwise specified, intermediate
+ * stream operations preserve the <a href="package-summary.html#Ordering">
+ * encounter order</a> of their source, and terminal operations
+ * respect the encounter order of their source, if the source
+ * has an encounter order. Provided that and parameters to stream operations
+ * satisfy the <a href="package-summary.html#NonInterference">non-interference
+ * requirements</a>, and excepting differences arising from the absence of
+ * a defined encounter order, the result of a stream pipeline should be the
+ * stable across multiple executions of the same operations on the same source.
+ * However, the timing and thread in which side-effects occur (for those
+ * operations which are allowed to produce side-effects, such as
+ * {@link #forEach(LongConsumer)}), are explicitly nondeterministic for parallel
+ * execution of stream pipelines.
+ *
+ * <p>Unless otherwise noted, passing a {@code null} argument to any stream
+ * method may result in a {@link NullPointerException}.
+ *
+ * @apiNote
+ * Streams are not data structures; they do not manage the storage for their
+ * elements, nor do they support access to individual elements. However,
+ * you can use the {@link #iterator()} or {@link #spliterator()} operations to
+ * perform a controlled traversal.
+ *
+ * @since 1.8
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface LongStream extends BaseStream<Long, LongStream> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> predicate to apply to
+ * each element to determine if it should be included
+ * @return the new stream
+ */
+ LongStream filter(LongPredicate predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ LongStream map(LongUnaryOperator mapper);
+
+ /**
+ * Returns an object-valued {@code Stream} consisting of the results of
+ * applying the given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param <U> the element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ <U> Stream<U> mapToObj(LongFunction<? extends U> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ IntStream mapToInt(LongToIntFunction mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ DoubleStream mapToDouble(LongToDoubleFunction mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of the stream produced by applying the
+ * provided mapping function to each element.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @apiNote
+ * The {@code flatMap()} operation has the effect of applying a one-to-many
+ * tranformation to the elements of the stream, and then flattening the
+ * resulting elements into a new stream. For example, if {@code orders}
+ * is a stream of purchase orders, and each purchase order contains a
+ * collection of line items, then the following produces a stream of line
+ * items:
+ * <pre>{@code
+ * orderStream.flatMap(order -> order.getLineItems().stream())...
+ * }</pre>
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to
+ * each element which produces an {@code LongStream} of new
+ * values
+ * @return the new stream
+ * @see Stream#flatMap(Function)
+ */
+ LongStream flatMap(LongFunction<? extends LongStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ LongStream distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream in sorted
+ * order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ LongStream sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * list.stream()
+ * .filter(filteringFunction)
+ * .peek(e -> {System.out.println("Filtered value: " + e); });
+ * .map(mappingFunction)
+ * .peek(e -> {System.out.println("Mapped value: " + e); });
+ * .collect(Collectors.toLongSummaryStastistics());
+ * }</pre>
+ *
+ * @param consumer a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ LongStream peek(LongConsumer consumer);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ LongStream limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream. If the
+ * {@code startInclusive} index lies past the end of this stream then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @param startInclusive the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} is negative
+ */
+ LongStream substream(long startInclusive);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream and
+ * truncated to contain no more than {@code endExclusive - startInclusive}
+ * elements. If the {@code startInclusive} index lies past the end
+ * of this stream then an empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param startInclusive the starting position of the substream, inclusive
+ * @param endExclusive the ending position of the substream, exclusive
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} or
+ * {@code endExclusive} is negative or {@code startInclusive} is greater
+ * than {@code endExclusive}
+ */
+ LongStream substream(long startInclusive, long endExclusive);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(LongConsumer action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(LongConsumer)
+ */
+ void forEachOrdered(LongConsumer action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ long[] toArray();
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * long result = identity;
+ * for (long element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code x},
+ * {@code accumulator.apply(identity, x)} is equal to {@code x}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, and average are all special cases of reduction.
+ * Summing a stream of numbers can be expressed as:
+ *
+ * <pre>{@code
+ * long sum = integers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * long sum = integers.reduce(0, Long::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param op an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #sum()
+ * @see #min()
+ * @see #max()
+ * @see #average()
+ */
+ long reduce(long identity, LongBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code OptionalLong} describing the reduced value,
+ * if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * long result = null;
+ * for (long element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.apply(result, element);
+ * }
+ * return foundAny ? OptionalLong.of(result) : OptionalLong.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param op an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(long, LongBinaryOperator)
+ */
+ OptionalLong reduce(LongBinaryOperator op);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable value holder,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result, rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = resultFactory.get();
+ * for (long element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(long, LongBinaryOperator)}, {@code collect} operations
+ * can be parallelized without requiring additional synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <R> type of the result
+ * @param resultFactory a function that creates a new result container.
+ * For a parallel execution, this function may be
+ * called multiple times and must return a fresh value
+ * each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for incorporating an additional
+ * element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values, which
+ * must be compatible with the accumulator function
+ * @return the result of the reduction
+ * @see Stream#collect(Supplier, BiConsumer, BiConsumer)
+ */
+ <R> R collect(Supplier<R> resultFactory,
+ ObjLongConsumer<R> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Returns the sum of elements in this stream. This is a special case
+ * of a <a href="package-summary.html#MutableReduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(0, Long::sum);
+ * }</pre>
+ *
+ * @return the sum of elements in this stream
+ */
+ long sum();
+
+ /**
+ * Returns an {@code OptionalLong} describing the minimum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#MutableReduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Long::min);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+
+ * @return an {@code OptionalLong} containing the minimum element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty
+ */
+ OptionalLong min();
+
+ /**
+ * Returns an {@code OptionalLong} describing the maximum element of this
+ * stream, or an empty optional if this stream is empty. This is a special
+ * case of a <a href="package-summary.html#MutableReduction">reduction</a>
+ * and is equivalent to:
+ * <pre>{@code
+ * return reduce(Long::max);
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an {@code OptionalLong} containing the maximum element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty
+ */
+ OptionalLong max();
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return map(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns an {@code OptionalDouble} describing the average of elements of
+ * this stream, or an empty optional if this stream is empty. This is a
+ * special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * @return an {@code OptionalDouble} containing the average element of this
+ * stream, or an empty optional if the stream is empty
+ */
+ OptionalDouble average();
+
+ /**
+ * Returns a {@code LongSummaryStatistics} describing various summary data
+ * about the elements of this stream. This is a special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * @return a {@code LongSummaryStatistics} describing various summary data
+ * about the elements of this stream
+ */
+ LongSummaryStatistics summaryStatistics();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean anyMatch(LongPredicate predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if all elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean allMatch(LongPredicate predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if no elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean noneMatch(LongPredicate predicate);
+
+ /**
+ * Returns an {@link OptionalLong} describing the first element of this
+ * stream (in the encounter order), or an empty {@code OptionalLong} if the
+ * stream is empty. If the stream has no encounter order, than any element
+ * may be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code OptionalLong} describing the first element of this
+ * stream, or an empty {@code OptionalLong} if the stream is empty
+ */
+ OptionalLong findFirst();
+
+ /**
+ * Returns an {@link OptionalLong} describing some element of the stream, or
+ * an empty {@code OptionalLong} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If the first element
+ * in the encounter order is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code OptionalLong} describing some element of this stream,
+ * or an empty {@code OptionalLong} if the stream is empty
+ * @see #findFirst()
+ */
+ OptionalLong findAny();
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}.
+ *
+ * @return a {@code DoubleStream} consisting of the elements of this stream,
+ * converted to {@code double}
+ */
+ DoubleStream doubles();
+
+ /**
+ * Returns a {@code Stream} consisting of the elements of this stream,
+ * each boxed to a {@code Long}.
+ *
+ * @return a {@code Stream} consistent of the elements of this stream,
+ * each boxed to {@code Long}
+ */
+ Stream<Long> boxed();
+
+ @Override
+ LongStream sequential();
+
+ @Override
+ LongStream parallel();
+
+ @Override
+ PrimitiveIterator.OfLong iterator();
+
+ @Override
+ Spliterator.OfLong spliterator();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/MatchOps.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.Spliterator;
+import java.util.function.DoublePredicate;
+import java.util.function.IntPredicate;
+import java.util.function.LongPredicate;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+/**
+ * Factory for instances of a short-circuiting {@code TerminalOp} that implement
+ * quantified predicate matching on the elements of a stream. Supported variants
+ * include match-all, match-any, and match-none.
+ *
+ * @since 1.8
+ */
+final class MatchOps {
+
+ private MatchOps() { }
+
+ /**
+ * Enum describing quantified match options -- all match, any match, none
+ * match.
+ */
+ enum MatchKind {
+ /** Do all elements match the predicate? */
+ ANY(true, true),
+
+ /** Do any elements match the predicate? */
+ ALL(false, false),
+
+ /** Do no elements match the predicate? */
+ NONE(true, false);
+
+ private final boolean stopOnPredicateMatches;
+ private final boolean shortCircuitResult;
+
+ private MatchKind(boolean stopOnPredicateMatches,
+ boolean shortCircuitResult) {
+ this.stopOnPredicateMatches = stopOnPredicateMatches;
+ this.shortCircuitResult = shortCircuitResult;
+ }
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for a Stream.
+ *
+ * @param <T> the type of stream elements
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static <T> TerminalOp<T, Boolean> makeRef(Predicate<? super T> predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<T> {
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(T t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref
+ Supplier<BooleanTerminalSink<T>> s = new Supplier<BooleanTerminalSink<T>>() {
+ @Override
+ public BooleanTerminalSink<T> get() {return new MatchSink();}
+ };
+ return new MatchOp<>(StreamShape.REFERENCE, matchKind, s);
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for an {@code IntStream}.
+ *
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static TerminalOp<Integer, Boolean> makeInt(IntPredicate predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<Integer> implements Sink.OfInt {
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(int t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref
+ Supplier<BooleanTerminalSink<Integer>> s = new Supplier<BooleanTerminalSink<Integer>>() {
+ @Override
+ public BooleanTerminalSink<Integer> get() {return new MatchSink();}
+ };
+ return new MatchOp<>(StreamShape.INT_VALUE, matchKind, s);
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for a {@code LongStream}.
+ *
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static TerminalOp<Long, Boolean> makeLong(LongPredicate predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<Long> implements Sink.OfLong {
+
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(long t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref
+ Supplier<BooleanTerminalSink<Long>> s = new Supplier<BooleanTerminalSink<Long>>() {
+ @Override
+ public BooleanTerminalSink<Long> get() {return new MatchSink();}
+ };
+ return new MatchOp<>(StreamShape.LONG_VALUE, matchKind, s);
+ }
+
+ /**
+ * Constructs a quantified predicate matcher for a {@code DoubleStream}.
+ *
+ * @param predicate the {@code Predicate} to apply to stream elements
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @return a {@code TerminalOp} implementing the desired quantified match
+ * criteria
+ */
+ public static TerminalOp<Double, Boolean> makeDouble(DoublePredicate predicate,
+ MatchKind matchKind) {
+ Objects.requireNonNull(predicate);
+ Objects.requireNonNull(matchKind);
+ class MatchSink extends BooleanTerminalSink<Double> implements Sink.OfDouble {
+
+ MatchSink() {
+ super(matchKind);
+ }
+
+ @Override
+ public void accept(double t) {
+ if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
+ stop = true;
+ value = matchKind.shortCircuitResult;
+ }
+ }
+ }
+
+ // @@@ Workaround for JDK-8011591 -- when fixed, replace s with constructor ref
+ Supplier<BooleanTerminalSink<Double>> s = new Supplier<BooleanTerminalSink<Double>>() {
+ @Override
+ public BooleanTerminalSink<Double> get() {return new MatchSink();}
+ };
+ return new MatchOp<>(StreamShape.DOUBLE_VALUE, matchKind, s);
+ }
+
+ /**
+ * A short-circuiting {@code TerminalOp} that evaluates a predicate on the
+ * elements of a stream and determines whether all, any or none of those
+ * elements match the predicate.
+ *
+ * @param <T> the output type of the stream pipeline
+ */
+ private static final class MatchOp<T> implements TerminalOp<T, Boolean> {
+ private final StreamShape inputShape;
+ final MatchKind matchKind;
+ final Supplier<BooleanTerminalSink<T>> sinkSupplier;
+
+ /**
+ * Constructs a {@code MatchOp}.
+ *
+ * @param shape the output shape of the stream pipeline
+ * @param matchKind the kind of quantified match (all, any, none)
+ * @param sinkSupplier {@code Supplier} for a {@code Sink} of the
+ * appropriate shape which implements the matching operation
+ */
+ MatchOp(StreamShape shape,
+ MatchKind matchKind,
+ Supplier<BooleanTerminalSink<T>> sinkSupplier) {
+ this.inputShape = shape;
+ this.matchKind = matchKind;
+ this.sinkSupplier = sinkSupplier;
+ }
+
+ @Override
+ public int getOpFlags() {
+ return StreamOpFlag.IS_SHORT_CIRCUIT | StreamOpFlag.NOT_ORDERED;
+ }
+
+ @Override
+ public StreamShape inputShape() {
+ return inputShape;
+ }
+
+ @Override
+ public <S> Boolean evaluateSequential(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ return helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).getAndClearState();
+ }
+
+ @Override
+ public <S> Boolean evaluateParallel(PipelineHelper<T> helper,
+ Spliterator<S> spliterator) {
+ // Approach for parallel implementation:
+ // - Decompose as per usual
+ // - run match on leaf chunks, call result "b"
+ // - if b == matchKind.shortCircuitOn, complete early and return b
+ // - else if we complete normally, return !shortCircuitOn
+
+ return new MatchTask<>(this, helper, spliterator).invoke();
+ }
+ }
+
+ /**
+ * Boolean specific terminal sink to avoid the boxing costs when returning
+ * results. Subclasses implement the shape-specific functionality.
+ *
+ * @param <T> The output type of the stream pipeline
+ */
+ private static abstract class BooleanTerminalSink<T> implements Sink<T> {
+ boolean stop;
+ boolean value;
+
+ BooleanTerminalSink(MatchKind matchKind) {
+ value = !matchKind.shortCircuitResult;
+ }
+
+ public boolean getAndClearState() {
+ return value;
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return stop;
+ }
+ }
+
+ /**
+ * ForkJoinTask implementation to implement a parallel short-circuiting
+ * quantified match
+ *
+ * @param <P_IN> the type of source elements for the pipeline
+ * @param <P_OUT> the type of output elements for the pipeline
+ */
+ private static final class MatchTask<P_IN, P_OUT>
+ extends AbstractShortCircuitTask<P_IN, P_OUT, Boolean, MatchTask<P_IN, P_OUT>> {
+ private final MatchOp<P_OUT> op;
+
+ /**
+ * Constructor for root node
+ */
+ MatchTask(MatchOp<P_OUT> op, PipelineHelper<P_OUT> helper,
+ Spliterator<P_IN> spliterator) {
+ super(helper, spliterator);
+ this.op = op;
+ }
+
+ /**
+ * Constructor for non-root node
+ */
+ MatchTask(MatchTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
+ super(parent, spliterator);
+ this.op = parent.op;
+ }
+
+ @Override
+ protected MatchTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
+ return new MatchTask<>(this, spliterator);
+ }
+
+ @Override
+ protected Boolean doLeaf() {
+ boolean b = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).getAndClearState();
+ if (b == op.matchKind.shortCircuitResult)
+ shortCircuit(b);
+ return null;
+ }
+
+ @Override
+ protected Boolean getEmptyResult() {
+ return !op.matchKind.shortCircuitResult;
+ }
+ }
+}
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/Node.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.LongConsumer;
+
+/**
+ * An immutable container for describing an ordered sequence of elements of some
+ * type {@code T}.
+ *
+ * <p>A {@code Node} contains a fixed number of elements, which can be accessed
+ * via the {@link #count}, {@link #spliterator}, {@link #forEach},
+ * {@link #asArray}, or {@link #copyInto} methods. A {@code Node} may have zero
+ * or more child {@code Node}s; if it has no children (accessed via
+ * {@link #getChildCount} and {@link #getChild(int)}, it is considered <em>flat
+ * </em> or a <em>leaf</em>; if it has children, it is considered an
+ * <em>internal</em> node. The size of an internal node is the sum of sizes of
+ * its children.
+ *
+ * @apiNote
+ * <p>A {@code Node} typically does not store the elements directly, but instead
+ * mediates access to one or more existing (effectively immutable) data
+ * structures such as a {@code Collection}, array, or a set of other
+ * {@code Node}s. Commonly {@code Node}s are formed into a tree whose shape
+ * corresponds to the computation tree that produced the elements that are
+ * contained in the leaf nodes. The use of {@code Node} within the stream
+ * framework is largely to avoid copying data unnecessarily during parallel
+ * operations.
+ *
+ * @param <T> the type of elements.
+ * @since 1.8
+ */
+interface Node<T> {
+
+ /**
+ * Returns a {@link Spliterator} describing the elements contained in this
+ * {@code Node}.
+ *
+ * @return a {@code Spliterator} describing the elements contained in this
+ * {@code Node}
+ */
+ Spliterator<T> spliterator();
+
+ /**
+ * Traverses the elements of this node, and invoke the provided
+ * {@code Consumer} with each element. Elements are provided in encounter
+ * order if the source for the {@code Node} has a defined encounter order.
+ *
+ * @param consumer a {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}
+ */
+ void forEach(Consumer<? super T> consumer);
+
+ /**
+ * Returns the number of child nodes of this node.
+ *
+ * @implSpec The default implementation returns zero.
+ *
+ * @return the number of child nodes
+ */
+ default int getChildCount() {
+ return 0;
+ }
+
+ /**
+ * Retrieves the child {@code Node} at a given index.
+ *
+ * @implSpec The default implementation always throws
+ * {@code IndexOutOfBoundsException}.
+ *
+ * @param i the index to the child node
+ * @return the child node
+ * @throws IndexOutOfBoundsException if the index is less than 0 or greater
+ * than or equal to the number of child nodes
+ */
+ default Node<T> getChild(int i) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ /**
+ * Provides an array view of the contents of this node.
+ *
+ * <p>Depending on the underlying implementation, this may return a
+ * reference to an internal array rather than a copy. Since the returned
+ * array may be shared, the returned array should not be modified. The
+ * {@code generator} function may be consulted to create the array if a new
+ * array needs to be created.
+ *
+ * @param generator a factory function which takes an integer parameter and
+ * returns a new, empty array of that size and of the appropriate
+ * array type
+ * @return an array containing the contents of this {@code Node}
+ */
+ T[] asArray(IntFunction<T[]> generator);
+
+ /**
+ * Copies the content of this {@code Node} into an array, starting at a
+ * given offset into the array. It is the caller's responsibility to ensure
+ * there is sufficient room in the array.
+ *
+ * @param array the array into which to copy the contents of this
+ * {@code Node}
+ * @param offset the starting offset within the array
+ * @throws IndexOutOfBoundsException if copying would cause access of data
+ * outside array bounds
+ * @throws NullPointerException if {@code array} is {@code null}
+ */
+ void copyInto(T[] array, int offset);
+
+ /**
+ * Gets the {@code StreamShape} associated with this {@code Node}.
+ *
+ * @implSpec The default in {@code Node} returns
+ * {@code StreamShape.REFERENCE}
+ *
+ * @return the stream shape associated with this node
+ */
+ default StreamShape getShape() {
+ return StreamShape.REFERENCE;
+ }
+
+ /**
+ * Returns the number of elements contained in this node.
+ *
+ * @return the number of elements contained in this node
+ */
+ long count();
+
+ /**
+ * A mutable builder for a {@code Node} that implements {@link Sink}, which
+ * builds a flat node containing the elements that have been pushed to it.
+ */
+ interface Builder<T> extends Sink<T> {
+
+ /**
+ * Builds the node. Should be called after all elements have been
+ * pushed and signalled with an invocation of {@link Sink#end()}.
+ *
+ * @return the resulting {@code Node}
+ */
+ Node<T> build();
+
+ /**
+ * Specialized @{code Node.Builder} for int elements
+ */
+ interface OfInt extends Node.Builder<Integer>, Sink.OfInt {
+ @Override
+ Node.OfInt build();
+ }
+
+ /**
+ * Specialized @{code Node.Builder} for long elements
+ */
+ interface OfLong extends Node.Builder<Long>, Sink.OfLong {
+ @Override
+ Node.OfLong build();
+ }
+
+ /**
+ * Specialized @{code Node.Builder} for double elements
+ */
+ interface OfDouble extends Node.Builder<Double>, Sink.OfDouble {
+ @Override
+ Node.OfDouble build();
+ }
+ }
+
+ /**
+ * Specialized {@code Node} for int elements
+ */
+ interface OfInt extends Node<Integer> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return a {@link Spliterator.OfInt} describing the elements of this
+ * node
+ */
+ @Override
+ Spliterator.OfInt spliterator();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param consumer a {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}. If this is an
+ * {@code IntConsumer}, it is cast to {@code IntConsumer} so the
+ * elements may be processed without boxing.
+ */
+ @Override
+ default void forEach(Consumer<? super Integer> consumer) {
+ if (consumer instanceof IntConsumer) {
+ forEach((IntConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfInt.forEachRemaining(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ /**
+ * Traverses the elements of this node, and invoke the provided
+ * {@code IntConsumer} with each element.
+ *
+ * @param consumer a {@code IntConsumer} that is to be invoked with each
+ * element in this {@code Node}
+ */
+ void forEach(IntConsumer consumer);
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes the generator to create
+ * an instance of an Integer[] array with a length of {@link #count()}
+ * and then invokes {@link #copyInto(Integer[], int)} with that
+ * Integer[] array at an offset of 0. This is not efficient and it is
+ * recommended to invoke {@link #asIntArray()}.
+ */
+ @Override
+ default Integer[] asArray(IntFunction<Integer[]> generator) {
+ Integer[] boxed = generator.apply((int) count());
+ copyInto(boxed, 0);
+ return boxed;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes {@link #asIntArray()} to
+ * obtain an int[] array then and copies the elements from that int[]
+ * array into the boxed Integer[] array. This is not efficient and it
+ * is recommended to invoke {@link #copyInto(int[], int)}.
+ */
+ @Override
+ default void copyInto(Integer[] boxed, int offset) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Integer[], int)");
+
+ int[] array = asIntArray();
+ for (int i = 0; i < array.length; i++) {
+ boxed[offset + i] = array[i];
+ }
+ }
+
+ @Override
+ default Node.OfInt getChild(int i) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ /**
+ * Views this node as an int[] array.
+ *
+ * <p>Depending on the underlying implementation this may return a
+ * reference to an internal array rather than a copy. It is the callers
+ * responsibility to decide if either this node or the array is utilized
+ * as the primary reference for the data.</p>
+ *
+ * @return an array containing the contents of this {@code Node}
+ */
+ int[] asIntArray();
+
+ /**
+ * Copies the content of this {@code Node} into an int[] array, starting
+ * at a given offset into the array. It is the caller's responsibility
+ * to ensure there is sufficient room in the array.
+ *
+ * @param array the array into which to copy the contents of this
+ * {@code Node}
+ * @param offset the starting offset within the array
+ * @throws IndexOutOfBoundsException if copying would cause access of
+ * data outside array bounds
+ * @throws NullPointerException if {@code array} is {@code null}
+ */
+ void copyInto(int[] array, int offset);
+
+ /**
+ * {@inheritDoc}
+ * @implSpec The default in {@code Node.OfInt} returns
+ * {@code StreamShape.INT_VALUE}
+ */
+ default StreamShape getShape() {
+ return StreamShape.INT_VALUE;
+ }
+
+ }
+
+ /**
+ * Specialized {@code Node} for long elements
+ */
+ interface OfLong extends Node<Long> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return a {@link Spliterator.OfLong} describing the elements of this
+ * node
+ */
+ @Override
+ Spliterator.OfLong spliterator();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param consumer A {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}. If this is an
+ * {@code LongConsumer}, it is cast to {@code LongConsumer} so
+ * the elements may be processed without boxing.
+ */
+ @Override
+ default void forEach(Consumer<? super Long> consumer) {
+ if (consumer instanceof LongConsumer) {
+ forEach((LongConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ /**
+ * Traverses the elements of this node, and invoke the provided
+ * {@code LongConsumer} with each element.
+ *
+ * @param consumer a {@code LongConsumer} that is to be invoked with
+ * each element in this {@code Node}
+ */
+ void forEach(LongConsumer consumer);
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes the generator to create
+ * an instance of a Long[] array with a length of {@link #count()} and
+ * then invokes {@link #copyInto(Long[], int)} with that Long[] array at
+ * an offset of 0. This is not efficient and it is recommended to
+ * invoke {@link #asLongArray()}.
+ */
+ @Override
+ default Long[] asArray(IntFunction<Long[]> generator) {
+ Long[] boxed = generator.apply((int) count());
+ copyInto(boxed, 0);
+ return boxed;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes {@link #asLongArray()}
+ * to obtain a long[] array then and copies the elements from that
+ * long[] array into the boxed Long[] array. This is not efficient and
+ * it is recommended to invoke {@link #copyInto(long[], int)}.
+ */
+ @Override
+ default void copyInto(Long[] boxed, int offset) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Long[], int)");
+
+ long[] array = asLongArray();
+ for (int i = 0; i < array.length; i++) {
+ boxed[offset + i] = array[i];
+ }
+ }
+
+ @Override
+ default Node.OfLong getChild(int i) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ /**
+ * Views this node as a long[] array.
+ *
+ * <p/>Depending on the underlying implementation this may return a
+ * reference to an internal array rather than a copy. It is the callers
+ * responsibility to decide if either this node or the array is utilized
+ * as the primary reference for the data.
+ *
+ * @return an array containing the contents of this {@code Node}
+ */
+ long[] asLongArray();
+
+ /**
+ * Copies the content of this {@code Node} into a long[] array, starting
+ * at a given offset into the array. It is the caller's responsibility
+ * to ensure there is sufficient room in the array.
+ *
+ * @param array the array into which to copy the contents of this
+ * {@code Node}
+ * @param offset the starting offset within the array
+ * @throws IndexOutOfBoundsException if copying would cause access of
+ * data outside array bounds
+ * @throws NullPointerException if {@code array} is {@code null}
+ */
+ void copyInto(long[] array, int offset);
+
+ /**
+ * {@inheritDoc}
+ * @implSpec The default in {@code Node.OfLong} returns
+ * {@code StreamShape.LONG_VALUE}
+ */
+ default StreamShape getShape() {
+ return StreamShape.LONG_VALUE;
+ }
+
+
+ }
+
+ /**
+ * Specialized {@code Node} for double elements
+ */
+ interface OfDouble extends Node<Double> {
+
+ /**
+ * {@inheritDoc}
+ *
+ * @return A {@link Spliterator.OfDouble} describing the elements of
+ * this node
+ */
+ @Override
+ Spliterator.OfDouble spliterator();
+
+ /**
+ * {@inheritDoc}
+ *
+ * @param consumer A {@code Consumer} that is to be invoked with each
+ * element in this {@code Node}. If this is an
+ * {@code DoubleConsumer}, it is cast to {@code DoubleConsumer}
+ * so the elements may be processed without boxing.
+ */
+ @Override
+ default void forEach(Consumer<? super Double> consumer) {
+ if (consumer instanceof DoubleConsumer) {
+ forEach((DoubleConsumer) consumer);
+ }
+ else {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)");
+ spliterator().forEachRemaining(consumer);
+ }
+ }
+
+ /**
+ * Traverses the elements of this node, and invoke the provided
+ * {@code DoubleConsumer} with each element.
+ *
+ * @param consumer A {@code DoubleConsumer} that is to be invoked with
+ * each element in this {@code Node}
+ */
+ void forEach(DoubleConsumer consumer);
+
+ //
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes the generator to create
+ * an instance of a Double[] array with a length of {@link #count()} and
+ * then invokes {@link #copyInto(Double[], int)} with that Double[]
+ * array at an offset of 0. This is not efficient and it is recommended
+ * to invoke {@link #asDoubleArray()}.
+ */
+ @Override
+ default Double[] asArray(IntFunction<Double[]> generator) {
+ Double[] boxed = generator.apply((int) count());
+ copyInto(boxed, 0);
+ return boxed;
+ }
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec the default implementation invokes {@link #asDoubleArray()}
+ * to obtain a double[] array then and copies the elements from that
+ * double[] array into the boxed Double[] array. This is not efficient
+ * and it is recommended to invoke {@link #copyInto(double[], int)}.
+ */
+ @Override
+ default void copyInto(Double[] boxed, int offset) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Node.OfDouble.copyInto(Double[], int)");
+
+ double[] array = asDoubleArray();
+ for (int i = 0; i < array.length; i++) {
+ boxed[offset + i] = array[i];
+ }
+ }
+
+ @Override
+ default Node.OfDouble getChild(int i) {
+ throw new IndexOutOfBoundsException();
+ }
+
+ /**
+ * Views this node as a double[] array.
+ *
+ * <p/>Depending on the underlying implementation this may return a
+ * reference to an internal array rather than a copy. It is the callers
+ * responsibility to decide if either this node or the array is utilized
+ * as the primary reference for the data.
+ *
+ * @return an array containing the contents of this {@code Node}
+ */
+ double[] asDoubleArray();
+
+ /**
+ * Copies the content of this {@code Node} into a double[] array, starting
+ * at a given offset into the array. It is the caller's responsibility
+ * to ensure there is sufficient room in the array.
+ *
+ * @param array the array into which to copy the contents of this
+ * {@code Node}
+ * @param offset the starting offset within the array
+ * @throws IndexOutOfBoundsException if copying would cause access of
+ * data outside array bounds
+ * @throws NullPointerException if {@code array} is {@code null}
+ */
+ void copyInto(double[] array, int offset);
+
+ /**
+ * {@inheritDoc}
+ *
+ * @implSpec The default in {@code Node.OfDouble} returns
+ * {@code StreamShape.DOUBLE_VALUE}
+ */
+ default StreamShape getShape() {
+ return StreamShape.DOUBLE_VALUE;
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/PipelineHelper.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,188 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+import java.util.function.IntFunction;
+
+/**
+ * Helper class for executing <a href="package-summary.html#StreamPipelines">
+ * stream pipelines</a>, capturing all of the information about a stream
+ * pipeline (output shape, intermediate operations, stream flags, parallelism,
+ * etc) in one place.
+ *
+ * <p>
+ * A {@code PipelineHelper} describes the initial segment of a stream pipeline,
+ * including its source, intermediate operations, and may additionally
+ * incorporate information about the terminal (or stateful) operation which
+ * follows the last intermediate operation described by this
+ * {@code PipelineHelper}. The {@code PipelineHelper} is passed to the
+ * {@link TerminalOp#evaluateParallel(PipelineHelper, java.util.Spliterator)},
+ * {@link TerminalOp#evaluateSequential(PipelineHelper, java.util.Spliterator)},
+ * and {@link AbstractPipeline#opEvaluateParallel(PipelineHelper, java.util.Spliterator,
+ * java.util.function.IntFunction)}, methods, which can use the
+ * {@code PipelineHelper} to access information about the pipeline such as
+ * input shape, output shape, stream flags, and size, and use the helper methods
+ * such as {@link #wrapAndCopyInto(Sink, Spliterator)},
+ * {@link #copyInto(Sink, Spliterator)}, and {@link #wrapSink(Sink)} to execute
+ * pipeline operations.
+ *
+ * @param <P_OUT> type of output elements from the pipeline
+ * @since 1.8
+ */
+abstract class PipelineHelper<P_OUT> {
+
+ /**
+ * Gets the combined stream and operation flags for the output of the described
+ * pipeline. This will incorporate stream flags from the stream source, all
+ * the intermediate operations and the terminal operation.
+ *
+ * @return the combined stream and operation flags
+ * @see StreamOpFlag
+ */
+ abstract int getStreamAndOpFlags();
+
+ /**
+ * Returns the exact output size of the portion of the output resulting from
+ * applying the pipeline stages described by this {@code PipelineHelper} to
+ * the the portion of the input described by the provided
+ * {@code Spliterator}, if known. If not known or known infinite, will
+ * return {@code -1}.
+ *
+ * @apiNote
+ * The exact output size is known if the {@code Spliterator} has the
+ * {@code SIZED} characteristic, and the operation flags
+ * {@link StreamOpFlag#SIZED} is known on the combined stream and operation
+ * flags.
+ *
+ * @param spliterator the spliterator describing the relevant portion of the
+ * source data
+ * @return the exact size if known, or -1 if infinite or unknown
+ */
+ abstract<P_IN> long exactOutputSizeIfKnown(Spliterator<P_IN> spliterator);
+
+ /**
+ * Applies the pipeline stages described by this {@code PipelineHelper} to
+ * the provided {@code Spliterator} and send the results to the provided
+ * {@code Sink}.
+ *
+ * @implSpec
+ * The implementation behaves as if:
+ * <pre>{@code
+ * intoWrapped(wrapSink(sink), spliterator);
+ * }</pre>
+ *
+ * @param sink the {@code Sink} to receive the results
+ * @param spliterator the spliterator describing the source input to process
+ */
+ abstract<P_IN, S extends Sink<P_OUT>> S wrapAndCopyInto(S sink, Spliterator<P_IN> spliterator);
+
+ /**
+ * Pushes elements obtained from the {@code Spliterator} into the provided
+ * {@code Sink}. If the stream pipeline is known to have short-circuiting
+ * stages in it (see {@link StreamOpFlag#SHORT_CIRCUIT}), the
+ * {@link Sink#cancellationRequested()} is checked after each
+ * element, stopping if cancellation is requested.
+ *
+ * @implSpec
+ * This method conforms to the {@code Sink} protocol of calling
+ * {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and
+ * calling {@code Sink.end} after all elements have been pushed.
+ *
+ * @param wrappedSink the destination {@code Sink}
+ * @param spliterator the source {@code Spliterator}
+ */
+ abstract<P_IN> void copyInto(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator);
+
+ /**
+ * Pushes elements obtained from the {@code Spliterator} into the provided
+ * {@code Sink}, checking {@link Sink#cancellationRequested()} after each
+ * element, and stopping if cancellation is requested.
+ *
+ * @implSpec
+ * This method conforms to the {@code Sink} protocol of calling
+ * {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and
+ * calling {@code Sink.end} after all elements have been pushed or if
+ * cancellation is requested.
+ *
+ * @param wrappedSink the destination {@code Sink}
+ * @param spliterator the source {@code Spliterator}
+ */
+ abstract <P_IN> void copyIntoWithCancel(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator);
+
+ /**
+ * Takes a {@code Sink} that accepts elements of the output type of the
+ * {@code PipelineHelper}, and wrap it with a {@code Sink} that accepts
+ * elements of the input type and implements all the intermediate operations
+ * described by this {@code PipelineHelper}, delivering the result into the
+ * provided {@code Sink}.
+ *
+ * @param sink the {@code Sink} to receive the results
+ * @return a {@code Sink} that implements the pipeline stages and sends
+ * results to the provided {@code Sink}
+ */
+ abstract<P_IN> Sink<P_IN> wrapSink(Sink<P_OUT> sink);
+
+ /**
+ * Constructs a @{link Node.Builder} compatible with the output shape of
+ * this {@code PipelineHelper}.
+ *
+ * @param exactSizeIfKnown if >=0 then a builder will be created that has a
+ * fixed capacity of exactly sizeIfKnown elements; if < 0 then the
+ * builder has variable capacity. A fixed capacity builder will fail
+ * if an element is added after the builder has reached capacity.
+ * @param generator a factory function for array instances
+ * @return a {@code Node.Builder} compatible with the output shape of this
+ * {@code PipelineHelper}
+ */
+ abstract Node.Builder<P_OUT> makeNodeBuilder(long exactSizeIfKnown,
+ IntFunction<P_OUT[]> generator);
+
+ /**
+ * Collects all output elements resulting from applying the pipeline stages
+ * to the source {@code Spliterator} into a {@code Node}.
+ *
+ * @implNote
+ * If the pipeline has no intermediate operations and the source is backed
+ * by a {@code Node} then that {@code Node} will be returned (or flattened
+ * and then returned). This reduces copying for a pipeline consisting of a
+ * stateful operation followed by a terminal operation that returns an
+ * array, such as:
+ * <pre>{@code
+ * stream.sorted().toArray();
+ * }</pre>
+ *
+ * @param spliterator the source {@code Spliterator}
+ * @param flatten if true and the pipeline is a parallel pipeline then the
+ * {@code Node} returned will contain no children, otherwise the
+ * {@code Node} may represent the root in a tree that reflects the
+ * shape of the computation tree.
+ * @param generator a factory function for array instances
+ * @return the {@code Node} containing all output elements
+ */
+ abstract<P_IN> Node<P_OUT> evaluate(Spliterator<P_IN> spliterator,
+ boolean flatten,
+ IntFunction<P_OUT[]> generator);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/Sink.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Objects;
+import java.util.function.Consumer;
+import java.util.function.DoubleConsumer;
+import java.util.function.IntConsumer;
+import java.util.function.LongConsumer;
+
+/**
+ * An extension of {@link Consumer} used to conduct values through the stages of
+ * a stream pipeline, with additional methods to manage size information,
+ * control flow, etc. Before calling the {@code accept()} method on a
+ * {@code Sink} for the first time, you must first call the {@code begin()}
+ * method to inform it that data is coming (optionally informing the sink how
+ * much data is coming), and after all data has been sent, you must call the
+ * {@code end()} method. After calling {@code end()}, you should not call
+ * {@code accept()} without again calling {@code begin()}. {@code Sink} also
+ * offers a mechanism by which the sink can cooperatively signal that it does
+ * not wish to receive any more data (the {@code cancellationRequested()}
+ * method), which a source can poll before sending more data to the
+ * {@code Sink}.
+ *
+ * <p>A sink may be in one of two states: an initial state and an active state.
+ * It starts out in the initial state; the {@code begin()} method transitions
+ * it to the active state, and the {@code end()} method transitions it back into
+ * the initial state, where it can be re-used. Data-accepting methods (such as
+ * {@code accept()} are only valid in the active state.
+ *
+ * @apiNote
+ * A stream pipeline consists of a source, zero or more intermediate stages
+ * (such as filtering or mapping), and a terminal stage, such as reduction or
+ * for-each. For concreteness, consider the pipeline:
+ *
+ * <pre>{@code
+ * int longestStringLengthStartingWithA
+ * = strings.stream()
+ * .filter(s -> s.startsWith("A"))
+ * .mapToInt(String::length)
+ * .max();
+ * }</pre>
+ *
+ * <p>Here, we have three stages, filtering, mapping, and reducing. The
+ * filtering stage consumes strings and emits a subset of those strings; the
+ * mapping stage consumes strings and emits ints; the reduction stage consumes
+ * those ints and computes the maximal value.
+ *
+ * <p>A {@code Sink} instance is used to represent each stage of this pipeline,
+ * whether the stage accepts objects, ints, longs, or doubles. Sink has entry
+ * points for {@code accept(Object)}, {@code accept(int)}, etc, so that we do
+ * not need a specialized interface for each primitive specialization. (It
+ * might be called a "kitchen sink" for this omnivorous tendency.) The entry
+ * point to the pipeline is the {@code Sink} for the filtering stage, which
+ * sends some elements "downstream" -- into the {@code Sink} for the mapping
+ * stage, which in turn sends integral values downstream into the {@code Sink}
+ * for the reduction stage. The {@code Sink} implementations associated with a
+ * given stage is expected to know the data type for the next stage, and call
+ * the correct {@code accept} method on its downstream {@code Sink}. Similarly,
+ * each stage must implement the correct {@code accept} method corresponding to
+ * the data type it accepts.
+ *
+ * <p>The specialized subtypes such as {@link Sink.OfInt} override
+ * {@code accept(Object)} to call the appropriate primitive specialization of
+ * {@code accept}, implement the appropriate primitive specialization of
+ * {@code Consumer}, and re-abstract the appropriate primitive specialization of
+ * {@code accept}.
+ *
+ * <p>The chaining subtypes such as {@link ChainedInt} not only implement
+ * {@code Sink.OfInt}, but also maintain a {@code downstream} field which
+ * represents the downstream {@code Sink}, and implement the methods
+ * {@code begin()}, {@code end()}, and {@code cancellationRequested()} to
+ * delegate to the downstream {@code Sink}. Most implementations of
+ * intermediate operations will use these chaining wrappers. For example, the
+ * mapping stage in the above example would look like:
+ *
+ * <pre>{@code
+ * IntSink is = new Sink.ChainedReference<U>(sink) {
+ * public void accept(U u) {
+ * downstream.accept(mapper.applyAsInt(u));
+ * }
+ * };
+ * }</pre>
+ *
+ * <p>Here, we implement {@code Sink.ChainedReference<U>}, meaning that we expect
+ * to receive elements of type {@code U} as input, and pass the downstream sink
+ * to the constructor. Because the next stage expects to receive integers, we
+ * must call the {@code accept(int)} method when emitting values to the downstream.
+ * The {@code accept()} method applies the mapping function from {@code U} to
+ * {@code int} and passes the resulting value to the downstream {@code Sink}.
+ *
+ * @param <T> type of elements for value streams
+ * @since 1.8
+ */
+interface Sink<T> extends Consumer<T> {
+ /**
+ * Resets the sink state to receive a fresh data set. This must be called
+ * before sending any data to the sink. After calling {@link #end()},
+ * you may call this method to reset the sink for another calculation.
+ * @param size The exact size of the data to be pushed downstream, if
+ * known or {@code -1} if unknown or infinite.
+ *
+ * <p>Prior to this call, the sink must be in the initial state, and after
+ * this call it is in the active state.
+ */
+ default void begin(long size) {}
+
+ /**
+ * Indicates that all elements have been pushed. If the {@code Sink} is
+ * stateful, it should send any stored state downstream at this time, and
+ * should clear any accumulated state (and associated resources).
+ *
+ * <p>Prior to this call, the sink must be in the active state, and after
+ * this call it is returned to the initial state.
+ */
+ default void end() {}
+
+ /**
+ * Indicates that this {@code Sink} does not wish to receive any more data.
+ *
+ * @implSpec The default implementation always returns false.
+ *
+ * @return true if cancellation is requested
+ */
+ default boolean cancellationRequested() {
+ return false;
+ }
+
+ /**
+ * Accepts an int value.
+ *
+ * @implSpec The default implementation throws IllegalStateException.
+ *
+ * @throws IllegalStateException if this sink does not accept int values
+ */
+ default void accept(int value) {
+ throw new IllegalStateException("called wrong accept method");
+ }
+
+ /**
+ * Accepts a long value.
+ *
+ * @implSpec The default implementation throws IllegalStateException.
+ *
+ * @throws IllegalStateException if this sink does not accept long values
+ */
+ default void accept(long value) {
+ throw new IllegalStateException("called wrong accept method");
+ }
+
+ /**
+ * Accepts a double value.
+ *
+ * @implSpec The default implementation throws IllegalStateException.
+ *
+ * @throws IllegalStateException if this sink does not accept double values
+ */
+ default void accept(double value) {
+ throw new IllegalStateException("called wrong accept method");
+ }
+
+ /**
+ * {@code Sink} that implements {@code Sink<Integer>}, re-abstracts
+ * {@code accept(int)}, and wires {@code accept(Integer)} to bridge to
+ * {@code accept(int)}.
+ */
+ interface OfInt extends Sink<Integer>, IntConsumer {
+ @Override
+ void accept(int value);
+
+ @Override
+ default void accept(Integer i) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
+ accept(i.intValue());
+ }
+ }
+
+ /**
+ * {@code Sink} that implements {@code Sink<Long>}, re-abstracts
+ * {@code accept(long)}, and wires {@code accept(Long)} to bridge to
+ * {@code accept(long)}.
+ */
+ interface OfLong extends Sink<Long>, LongConsumer {
+ @Override
+ void accept(long value);
+
+ @Override
+ default void accept(Long i) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Sink.OfLong.accept(Long)");
+ accept(i.longValue());
+ }
+ }
+
+ /**
+ * {@code Sink} that implements {@code Sink<Double>}, re-abstracts
+ * {@code accept(double)}, and wires {@code accept(Double)} to bridge to
+ * {@code accept(double)}.
+ */
+ interface OfDouble extends Sink<Double>, DoubleConsumer {
+ @Override
+ void accept(double value);
+
+ @Override
+ default void accept(Double i) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} calling Sink.OfDouble.accept(Double)");
+ accept(i.doubleValue());
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink<T>}. The
+ * implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedReference<T> implements Sink<T> {
+ protected final Sink downstream;
+
+ public ChainedReference(Sink downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation designed for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink.OfInt}.
+ * The implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedInt implements Sink.OfInt {
+ protected final Sink downstream;
+
+ public ChainedInt(Sink downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation designed for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink.OfLong}.
+ * The implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedLong implements Sink.OfLong {
+ protected final Sink downstream;
+
+ public ChainedLong(Sink downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+
+ /**
+ * Abstract {@code Sink} implementation designed for creating chains of
+ * sinks. The {@code begin}, {@code end}, and
+ * {@code cancellationRequested} methods are wired to chain to the
+ * downstream {@code Sink}. This implementation takes a downstream
+ * {@code Sink} of unknown input shape and produces a {@code Sink.OfDouble}.
+ * The implementation of the {@code accept()} method must call the correct
+ * {@code accept()} method on the downstream {@code Sink}.
+ */
+ static abstract class ChainedDouble implements Sink.OfDouble {
+ protected final Sink downstream;
+
+ public ChainedDouble(Sink downstream) {
+ this.downstream = Objects.requireNonNull(downstream);
+ }
+
+ @Override
+ public void begin(long size) {
+ downstream.begin(size);
+ }
+
+ @Override
+ public void end() {
+ downstream.end();
+ }
+
+ @Override
+ public boolean cancellationRequested() {
+ return downstream.cancellationRequested();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/Stream.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,782 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Comparator;
+import java.util.Optional;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntFunction;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongFunction;
+
+// @@@ Specification to-do list @@@
+// - Describe the difference between sequential and parallel streams
+// - More general information about reduce, better definitions for associativity, more description of
+// how reduce employs parallelism, more examples
+// - Role of stream flags in various operations, specifically ordering
+// - Whether each op preserves encounter order
+// @@@ Specification to-do list @@@
+
+/**
+ * A sequence of elements supporting sequential and parallel bulk operations.
+ * Streams support lazy intermediate operations (transforming a stream to
+ * another stream) such as {@code filter} and {@code map}, and terminal
+ * operations (consuming the contents of a stream to produce a result or
+ * side-effect), such as {@code forEach}, {@code findFirst}, and {@code
+ * iterator}. Once an operation has been performed on a stream, it
+ * is considered <em>consumed</em> and no longer usable for other operations.
+ *
+ * <p>For sequential stream pipelines, all operations are performed in the
+ * <a href="package-summary.html#Ordering">encounter order</a> of the pipeline
+ * source, if the pipeline source has a defined encounter order.
+ *
+ * <p>For parallel stream pipelines, unless otherwise specified, intermediate
+ * stream operations preserve the <a href="package-summary.html#Ordering">
+ * encounter order</a> of their source, and terminal operations
+ * respect the encounter order of their source, if the source
+ * has an encounter order. Provided that and parameters to stream operations
+ * satisfy the <a href="package-summary.html#NonInterference">non-interference
+ * requirements</a>, and excepting differences arising from the absence of
+ * a defined encounter order, the result of a stream pipeline should be the
+ * stable across multiple executions of the same operations on the same source.
+ * However, the timing and thread in which side-effects occur (for those
+ * operations which are allowed to produce side-effects, such as
+ * {@link #forEach(Consumer)}), are explicitly nondeterministic for parallel
+ * execution of stream pipelines.
+ *
+ * <p>Unless otherwise noted, passing a {@code null} argument to any stream
+ * method may result in a {@link NullPointerException}.
+ *
+ * @apiNote
+ * Streams are not data structures; they do not manage the storage for their
+ * elements, nor do they support access to individual elements. However,
+ * you can use the {@link #iterator()} or {@link #spliterator()} operations to
+ * perform a controlled traversal.
+ *
+ * @param <T> type of elements
+ * @since 1.8
+ * @see <a href="package-summary.html">java.util.stream</a>
+ */
+public interface Stream<T> extends BaseStream<T, Stream<T>> {
+
+ /**
+ * Returns a stream consisting of the elements of this stream that match
+ * the given predicate.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> predicate to apply to
+ * each element to determine if it should be included
+ * @return the new stream
+ */
+ Stream<T> filter(Predicate<? super T> predicate);
+
+ /**
+ * Returns a stream consisting of the results of applying the given
+ * function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param <R> The element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ <R> Stream<R> map(Function<? super T, ? extends R> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">
+ * intermediate operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ IntStream mapToInt(ToIntFunction<? super T> mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ LongStream mapToLong(ToLongFunction<? super T> mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of applying the
+ * given function to the elements of this stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element
+ * @return the new stream
+ */
+ DoubleStream mapToDouble(ToDoubleFunction<? super T> mapper);
+
+ /**
+ * Returns a stream consisting of the results of replacing each element of
+ * this stream with the contents of the stream produced by applying the
+ * provided mapping function to each element. If the result of the mapping
+ * function is {@code null}, this is treated as if the result is an empty
+ * stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @apiNote
+ * The {@code flatMap()} operation has the effect of applying a one-to-many
+ * tranformation to the elements of the stream, and then flattening the
+ * resulting elements into a new stream. For example, if {@code orders}
+ * is a stream of purchase orders, and each purchase order contains a
+ * collection of line items, then the following produces a stream of line
+ * items:
+ * <pre>{@code
+ * orderStream.flatMap(order -> order.getLineItems().stream())...
+ * }</pre>
+ *
+ * @param <R> The element type of the new stream
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element which produces a stream of new values
+ * @return the new stream
+ */
+ <R> Stream<R> flatMap(Function<? super T, ? extends Stream<? extends R>> mapper);
+
+ /**
+ * Returns an {@code IntStream} consisting of the results of replacing each
+ * element of this stream with the contents of the stream produced by
+ * applying the provided mapping function to each element. If the result of
+ * the mapping function is {@code null}, this is treated as if the result is
+ * an empty stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element which produces a stream of new values
+ * @return the new stream
+ */
+ IntStream flatMapToInt(Function<? super T, ? extends IntStream> mapper);
+
+ /**
+ * Returns a {@code LongStream} consisting of the results of replacing each
+ * element of this stream with the contents of the stream produced
+ * by applying the provided mapping function to each element. If the result
+ * of the mapping function is {@code null}, this is treated as if the
+ * result is an empty stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to
+ * each element which produces a stream of new values
+ * @return the new stream
+ */
+ LongStream flatMapToLong(Function<? super T, ? extends LongStream> mapper);
+
+ /**
+ * Returns a {@code DoubleStream} consisting of the results of replacing each
+ * element of this stream with the contents of the stream produced
+ * by applying the provided mapping function to each element. If the result
+ * of the mapping function is {@code null}, this is treated as if the result
+ * is an empty stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * @param mapper a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> function to apply to each
+ * element which produces a stream of new values
+ * @return the new stream
+ */
+ DoubleStream flatMapToDouble(Function<? super T, ? extends DoubleStream> mapper);
+
+ /**
+ * Returns a stream consisting of the distinct elements (according to
+ * {@link Object#equals(Object)}) of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ Stream<T> distinct();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, sorted
+ * according to natural order. If the elements of this stream are not
+ * {@code Comparable}, a {@code java.lang.ClassCastException} may be thrown
+ * when the stream pipeline is executed.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @return the new stream
+ */
+ Stream<T> sorted();
+
+ /**
+ * Returns a stream consisting of the elements of this stream, sorted
+ * according to the provided {@code Comparator}.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @param comparator a <a href="package-summary.html#NonInterference">
+ * non-interfering, stateless</a> {@code Comparator} to
+ * be used to compare stream elements
+ * @return the new stream
+ */
+ Stream<T> sorted(Comparator<? super T> comparator);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, additionally
+ * performing the provided action on each element as elements are consumed
+ * from the resulting stream.
+ *
+ * <p>This is an <a href="package-summary.html#StreamOps">intermediate
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, the action may be called at
+ * whatever time and in whatever thread the element is made available by the
+ * upstream operation. If the action modifies shared state,
+ * it is responsible for providing the required synchronization.
+ *
+ * @apiNote This method exists mainly to support debugging, where you want
+ * to see the elements as they flow past a certain point in a pipeline:
+ * <pre>{@code
+ * list.stream()
+ * .filter(filteringFunction)
+ * .peek(e -> {System.out.println("Filtered value: " + e); });
+ * .map(mappingFunction)
+ * .peek(e -> {System.out.println("Mapped value: " + e); });
+ * .collect(Collectors.intoList());
+ * }</pre>
+ *
+ * @param consumer a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements as
+ * they are consumed from the stream
+ * @return the new stream
+ */
+ Stream<T> peek(Consumer<? super T> consumer);
+
+ /**
+ * Returns a stream consisting of the elements of this stream, truncated
+ * to be no longer than {@code maxSize} in length.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param maxSize the number of elements the stream should be limited to
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code maxSize} is negative
+ */
+ Stream<T> limit(long maxSize);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream. If the
+ * {@code startInclusive} index lies past the end of this stream then an
+ * empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">stateful
+ * intermediate operation</a>.
+ *
+ * @param startInclusive the number of leading elements to skip
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} is negative
+ */
+ Stream<T> substream(long startInclusive);
+
+ /**
+ * Returns a stream consisting of the remaining elements of this stream
+ * after indexing {@code startInclusive} elements into the stream and
+ * truncated to contain no more than {@code endExclusive - startInclusive}
+ * elements. If the {@code startInclusive} index lies past the end
+ * of this stream then an empty stream will be returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * stateful intermediate operation</a>.
+ *
+ * @param startInclusive the starting position of the substream, inclusive
+ * @param endExclusive the ending position of the substream, exclusive
+ * @return the new stream
+ * @throws IllegalArgumentException if {@code startInclusive} or
+ * {@code endExclusive} is negative or {@code startInclusive} is greater
+ * than {@code endExclusive}
+ */
+ Stream<T> substream(long startInclusive, long endExclusive);
+
+ /**
+ * Performs an action for each element of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>For parallel stream pipelines, this operation does <em>not</em>
+ * guarantee to respect the encounter order of the stream, as doing so
+ * would sacrifice the benefit of parallelism. For any given element, the
+ * action may be performed at whatever time and in whatever thread the
+ * library chooses. If the action accesses shared state, it is
+ * responsible for providing the required synchronization.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ */
+ void forEach(Consumer<? super T> action);
+
+ /**
+ * Performs an action for each element of this stream, guaranteeing that
+ * each element is processed in encounter order for streams that have a
+ * defined encounter order.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param action a <a href="package-summary.html#NonInterference">
+ * non-interfering</a> action to perform on the elements
+ * @see #forEach(Consumer)
+ */
+ void forEachOrdered(Consumer<? super T> action);
+
+ /**
+ * Returns an array containing the elements of this stream.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @return an array containing the elements of this stream
+ */
+ Object[] toArray();
+
+ /**
+ * Returns an array containing the elements of this stream, using the
+ * provided {@code generator} function to allocate the returned array.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param <A> the element type of the resulting array
+ * @param generator a function which produces a new array of the desired
+ * type and the provided length
+ * @return an array containing the elements in this stream
+ * @throws ArrayStoreException if the runtime type of the array returned
+ * from the array generator is not a supertype of the runtime type of every
+ * element in this stream
+ */
+ <A> A[] toArray(IntFunction<A[]> generator);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity value and an
+ * <a href="package-summary.html#Associativity">associative</a>
+ * accumulation function, and returns the reduced value. This is equivalent
+ * to:
+ * <pre>{@code
+ * T result = identity;
+ * for (T element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the accumulator
+ * function. This means that for all {@code t},
+ * {@code accumulator.apply(identity, t)} is equal to {@code t}.
+ * The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Sum, min, max, average, and string concatenation are all special
+ * cases of reduction. Summing a stream of numbers can be expressed as:
+ *
+ * <pre>{@code
+ * Integer sum = integers.reduce(0, (a, b) -> a+b);
+ * }</pre>
+ *
+ * or more compactly:
+ *
+ * <pre>{@code
+ * Integer sum = integers.reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>While this may seem a more roundabout way to perform an aggregation
+ * compared to simply mutating a running total in a loop, reduction
+ * operations parallelize more gracefully, without needing additional
+ * synchronization and with greatly reduced risk of data races.
+ *
+ * @param identity the identity value for the accumulating function
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ */
+ T reduce(T identity, BinaryOperator<T> accumulator);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using an
+ * <a href="package-summary.html#Associativity">associative</a> accumulation
+ * function, and returns an {@code Optional} describing the reduced value,
+ * if any. This is equivalent to:
+ * <pre>{@code
+ * boolean foundAny = false;
+ * T result = null;
+ * for (T element : this stream) {
+ * if (!foundAny) {
+ * foundAny = true;
+ * result = element;
+ * }
+ * else
+ * result = accumulator.apply(result, element);
+ * }
+ * return foundAny ? Optional.of(result) : Optional.empty();
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code accumulator} function must be an
+ * <a href="package-summary.html#Associativity">associative</a> function.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values
+ * @return the result of the reduction
+ * @see #reduce(Object, BinaryOperator)
+ * @see #min(java.util.Comparator)
+ * @see #max(java.util.Comparator)
+ */
+ Optional<T> reduce(BinaryOperator<T> accumulator);
+
+ /**
+ * Performs a <a href="package-summary.html#Reduction">reduction</a> on the
+ * elements of this stream, using the provided identity, accumulation
+ * function, and a combining functions. This is equivalent to:
+ * <pre>{@code
+ * U result = identity;
+ * for (T element : this stream)
+ * result = accumulator.apply(result, element)
+ * return result;
+ * }</pre>
+ *
+ * but is not constrained to execute sequentially.
+ *
+ * <p>The {@code identity} value must be an identity for the combiner
+ * function. This means that for all {@code u}, {@code combiner(identity, u)}
+ * is equal to {@code u}. Additionally, the {@code combiner} function
+ * must be compatible with the {@code accumulator} function; for all
+ * {@code u} and {@code t}, the following must hold:
+ * <pre>{@code
+ * combiner.apply(u, accumulator.apply(identity, t)) == accumulator.apply(u, t)
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote Many reductions using this form can be represented more simply
+ * by an explicit combination of {@code map} and {@code reduce} operations.
+ * The {@code accumulator} function acts as a fused mapper and accumulator,
+ * which can sometimes be more efficient than separate mapping and reduction,
+ * such as in the case where knowing the previously reduced value allows you
+ * to avoid some computation.
+ *
+ * @param <U> The type of the result
+ * @param identity the identity value for the combiner function
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for incorporating an additional
+ * element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values, which
+ * must be compatible with the accumulator function
+ * @return the result of the reduction
+ * @see #reduce(BinaryOperator)
+ * @see #reduce(Object, BinaryOperator)
+ */
+ <U> U reduce(U identity,
+ BiFunction<U, ? super T, U> accumulator,
+ BinaryOperator<U> combiner);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream. A mutable
+ * reduction is one in which the reduced value is a mutable value holder,
+ * such as an {@code ArrayList}, and elements are incorporated by updating
+ * the state of the result, rather than by replacing the result. This
+ * produces a result equivalent to:
+ * <pre>{@code
+ * R result = resultFactory.get();
+ * for (T element : this stream)
+ * accumulator.accept(result, element);
+ * return result;
+ * }</pre>
+ *
+ * <p>Like {@link #reduce(Object, BinaryOperator)}, {@code collect} operations
+ * can be parallelized without requiring additional synchronization.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @apiNote There are many existing classes in the JDK whose signatures are
+ * a good match for use as arguments to {@code collect()}. For example,
+ * the following will accumulate strings into an ArrayList:
+ * <pre>{@code
+ * List<String> asList = stringStream.collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
+ * }</pre>
+ *
+ * <p>The following will take a stream of strings and concatenates them into a
+ * single string:
+ * <pre>{@code
+ * String concat = stringStream.collect(StringBuilder::new, StringBuilder::append,
+ * StringBuilder::append)
+ * .toString();
+ * }</pre>
+ *
+ * @param <R> type of the result
+ * @param resultFactory a function that creates a new result container.
+ * For a parallel execution, this function may be
+ * called multiple times and must return a fresh value
+ * each time.
+ * @param accumulator an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for incorporating an additional
+ * element into a result
+ * @param combiner an <a href="package-summary.html#Associativity">associative</a>
+ * <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> function for combining two values, which
+ * must be compatible with the accumulator function
+ * @return the result of the reduction
+ */
+ <R> R collect(Supplier<R> resultFactory,
+ BiConsumer<R, ? super T> accumulator,
+ BiConsumer<R, R> combiner);
+
+ /**
+ * Performs a <a href="package-summary.html#MutableReduction">mutable
+ * reduction</a> operation on the elements of this stream using a
+ * {@code Collector} object to describe the reduction. A {@code Collector}
+ * encapsulates the functions used as arguments to
+ * {@link #collect(Supplier, BiConsumer, BiConsumer)}, allowing for reuse of
+ * collection strategies, and composition of collect operations such as
+ * multiple-level grouping or partitioning.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * <p>When executed in parallel, multiple intermediate results may be
+ * instantiated, populated, and merged, so as to maintain isolation of
+ * mutable data structures. Therefore, even when executed in parallel
+ * with non-thread-safe data structures (such as {@code ArrayList}), no
+ * additional synchronization is needed for a parallel reduction.
+ *
+ * @apiNote
+ * The following will accumulate strings into an ArrayList:
+ * <pre>{@code
+ * List<String> asList = stringStream.collect(Collectors.toList());
+ * }</pre>
+ *
+ * <p>The following will classify {@code Person} objects by city:
+ * <pre>{@code
+ * Map<String, Collection<Person>> peopleByCity
+ * = personStream.collect(Collectors.groupBy(Person::getCity));
+ * }</pre>
+ *
+ * <p>The following will classify {@code Person} objects by state and city,
+ * cascading two {@code Collector}s together:
+ * <pre>{@code
+ * Map<String, Map<String, Collection<Person>>> peopleByStateAndCity
+ * = personStream.collect(Collectors.groupBy(Person::getState,
+ * Collectors.groupBy(Person::getCity)));
+ * }</pre>
+ *
+ * @param <R> the type of the result
+ * @param collector the {@code Collector} describing the reduction
+ * @return the result of the reduction
+ * @see #collect(Supplier, BiConsumer, BiConsumer)
+ * @see Collectors
+ */
+ <R> R collect(Collector<? super T, R> collector);
+
+ /**
+ * Returns the minimum element of this stream according to the provided
+ * {@code Comparator}. This is a special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @param comparator a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> {@code Comparator} to use to compare
+ * elements of this stream
+ * @return an {@code Optional} describing the minimum element of this stream,
+ * or an empty {@code Optional} if the stream is empty
+ */
+ Optional<T> min(Comparator<? super T> comparator);
+
+ /**
+ * Returns the maximum element of this stream according to the provided
+ * {@code Comparator}. This is a special case of a
+ * <a href="package-summary.html#MutableReduction">reduction</a>.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal
+ * operation</a>.
+ *
+ * @param comparator a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> {@code Comparator} to use to compare
+ * elements of this stream
+ * @return an {@code Optional} describing the maximum element of this stream,
+ * or an empty {@code Optional} if the stream is empty
+ */
+ Optional<T> max(Comparator<? super T> comparator);
+
+ /**
+ * Returns the count of elements in this stream. This is a special case of
+ * a <a href="package-summary.html#MutableReduction">reduction</a> and is
+ * equivalent to:
+ * <pre>{@code
+ * return mapToLong(e -> 1L).sum();
+ * }</pre>
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
+ *
+ * @return the count of elements in this stream
+ */
+ long count();
+
+ /**
+ * Returns whether any elements of this stream match the provided
+ * predicate. May not evaluate the predicate on all elements if not
+ * necessary for determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if any elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean anyMatch(Predicate<? super T> predicate);
+
+ /**
+ * Returns whether all elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if all elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean allMatch(Predicate<? super T> predicate);
+
+ /**
+ * Returns whether no elements of this stream match the provided predicate.
+ * May not evaluate the predicate on all elements if not necessary for
+ * determining the result.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @param predicate a <a href="package-summary.html#NonInterference">non-interfering,
+ * stateless</a> predicate to apply to elements of this
+ * stream
+ * @return {@code true} if no elements of the stream match the provided
+ * predicate otherwise {@code false}
+ */
+ boolean noneMatch(Predicate<? super T> predicate);
+
+ /**
+ * Returns an {@link Optional} describing the first element of this stream
+ * (in the encounter order), or an empty {@code Optional} if the stream is
+ * empty. If the stream has no encounter order, than any element may be
+ * returned.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * @return an {@code Optional} describing the first element of this stream,
+ * or an empty {@code Optional} if the stream is empty
+ * @throws NullPointerException if the element selected is null
+ */
+ Optional<T> findFirst();
+
+ /**
+ * Returns an {@link Optional} describing some element of the stream, or an
+ * empty {@code Optional} if the stream is empty.
+ *
+ * <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
+ * terminal operation</a>.
+ *
+ * <p>The behavior of this operation is explicitly nondeterministic; it is
+ * free to select any element in the stream. This is to allow for maximal
+ * performance in parallel operations; the cost is that multiple invocations
+ * on the same source may not return the same result. (If the first element
+ * in the encounter order is desired, use {@link #findFirst()} instead.)
+ *
+ * @return an {@code Optional} describing some element of this stream, or an
+ * empty {@code Optional} if the stream is empty
+ * @throws NullPointerException if the element selected is null
+ * @see #findFirst()
+ */
+ Optional<T> findAny();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/StreamOpFlag.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,753 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.EnumMap;
+import java.util.Map;
+import java.util.Spliterator;
+
+/**
+ * Flags corresponding to characteristics of streams and operations. Flags are
+ * utilized by the stream framework to control, specialize or optimize
+ * computation.
+ *
+ * <p>
+ * Stream flags may be used to describe characteristics of several different
+ * entities associated with streams: stream sources, intermediate operations,
+ * and terminal operations. Not all stream flags are meaningful for all
+ * entities; the following table summarizes which flags are meaningful in what
+ * contexts:
+ *
+ * <div>
+ * <table>
+ * <caption>Type Characteristics</caption>
+ * <thead class="tableSubHeadingColor">
+ * <tr>
+ * <th colspan="2"> </th>
+ * <th>{@code DISTINCT}</th>
+ * <th>{@code SORTED}</th>
+ * <th>{@code ORDERED}</th>
+ * <th>{@code SIZED}</th>
+ * <th>{@code SHORT_CIRCUIT}</th>
+ * </tr>
+ * </thead>
+ * <tbody>
+ * <tr>
+ * <th colspan="2" class="tableSubHeadingColor">Stream source</th>
+ * <td>Y</td>
+ * <td>Y</td>
+ * <td>Y</td>
+ * <td>Y</td>
+ * <td>N</td>
+ * </tr>
+ * <tr>
+ * <th colspan="2" class="tableSubHeadingColor">Intermediate operation</th>
+ * <td>PCI</td>
+ * <td>PCI</td>
+ * <td>PCI</td>
+ * <td>PC</td>
+ * <td>PI</td>
+ * </tr>
+ * <tr>
+ * <th colspan="2" class="tableSubHeadingColor">Terminal operation</th>
+ * <td>N</td>
+ * <td>N</td>
+ * <td>PC</td>
+ * <td>N</td>
+ * <td>PI</td>
+ * </tr>
+ * </tbody>
+ * <tfoot>
+ * <tr>
+ * <th class="tableSubHeadingColor" colspan="2">Legend</th>
+ * <th colspan="6" rowspan="7"> </th>
+ * </tr>
+ * <tr>
+ * <th class="tableSubHeadingColor">Flag</th>
+ * <th class="tableSubHeadingColor">Meaning</th>
+ * <th colspan="6"></th>
+ * </tr>
+ * <tr><td>Y</td><td>Allowed</td></tr>
+ * <tr><td>N</td><td>Invalid</td></tr>
+ * <tr><td>P</td><td>Preserves</td></tr>
+ * <tr><td>C</td><td>Clears</td></tr>
+ * <tr><td>I</td><td>Injects</td></tr>
+ * </tfoot>
+ * </table>
+ * </div>
+ *
+ * <p>In the above table, "PCI" means "may preserve, clear, or inject"; "PC"
+ * means "may preserve or clear", "PI" means "may preserve or inject", and "N"
+ * means "not valid".
+ *
+ * <p>Stream flags are represented by unioned bit sets, so that a single word
+ * may describe all the characteristics of a given stream entity, and that, for
+ * example, the flags for a stream source can be efficiently combined with the
+ * flags for later operations on that stream.
+ *
+ * <p>The bit masks {@link #STREAM_MASK}, {@link #OP_MASK}, and
+ * {@link #TERMINAL_OP_MASK} can be ANDed with a bit set of stream flags to
+ * produce a mask containing only the valid flags for that entity type.
+ *
+ * <p>When describing a stream source, one only need describe what
+ * characteristics that stream has; when describing a stream operation, one need
+ * describe whether the operation preserves, injects, or clears that
+ * characteristic. Accordingly, two bits are used for each flag, so as to allow
+ * representing not only the presence of of a characteristic, but how an
+ * operation modifies that characteristic. There are two common forms in which
+ * flag bits are combined into an {@code int} bit set. <em>Stream flags</em>
+ * are a unioned bit set constructed by ORing the enum characteristic values of
+ * {@link #set()} (or, more commonly, ORing the corresponding static named
+ * constants prefixed with {@code IS_}). <em>Operation flags</em> are a unioned
+ * bit set constructed by ORing the enum characteristic values of {@link #set()}
+ * or {@link #clear()} (to inject, or clear, respectively, the corresponding
+ * flag), or more commonly ORing the corresponding named constants prefixed with
+ * {@code IS_} or {@code NOT_}. Flags that are not marked with {@code IS_} or
+ * {@code NOT_} are implicitly treated as preserved. Care must be taken when
+ * combining bitsets that the correct combining operations are applied in the
+ * correct order.
+ *
+ * <p>
+ * With the exception of {@link #SHORT_CIRCUIT}, stream characteristics can be
+ * derived from the equivalent {@link java.util.Spliterator} characteristics:
+ * {@link java.util.Spliterator#DISTINCT}, {@link java.util.Spliterator#SORTED},
+ * {@link java.util.Spliterator#ORDERED}, and
+ * {@link java.util.Spliterator#SIZED}. A spliterator characteristics bit set
+ * can be converted to stream flags using the method
+ * {@link #fromCharacteristics(java.util.Spliterator)} and converted back using
+ * {@link #toCharacteristics(int)}. (The bit set
+ * {@link #SPLITERATOR_CHARACTERISTICS_MASK} is used to AND with a bit set to
+ * produce a valid spliterator characteristics bit set that can be converted to
+ * stream flags.)
+ *
+ * <p>
+ * The source of a stream encapsulates a spliterator. The characteristics of
+ * that source spliterator when transformed to stream flags will be a proper
+ * subset of stream flags of that stream.
+ * For example:
+ * <pre> {@code
+ * Spliterator s = ...;
+ * Stream stream = Streams.stream(s);
+ * flagsFromSplitr = fromCharacteristics(s.characteristics());
+ * assert(flagsFromSplitr & stream.getStreamFlags() == flagsFromSplitr);
+ * }</pre>
+ *
+ * <p>
+ * An intermediate operation, performed on an input stream to create a new
+ * output stream, may preserve, clear or inject stream or operation
+ * characteristics. Similarly, a terminal operation, performed on an input
+ * stream to produce an output result may preserve, clear or inject stream or
+ * operation characteristics. Preservation means that if that characteristic
+ * is present on the input, then it is also present on the output. Clearing
+ * means that the characteristic is not present on the output regardless of the
+ * input. Injection means that the characteristic is present on the output
+ * regardless of the input. If a characteristic is not cleared or injected then
+ * it is implicitly preserved.
+ *
+ * <p>
+ * A pipeline consists of a stream source encapsulating a spliterator, one or
+ * more intermediate operations, and finally a terminal operation that produces
+ * a result. At each stage of the pipeline, a combined stream and operation
+ * flags can be calculated, using {@link #combineOpFlags(int, int)}. Such flags
+ * ensure that preservation, clearing and injecting information is retained at
+ * each stage.
+ *
+ * The combined stream and operation flags for the source stage of the pipeline
+ * is calculated as follows:
+ * <pre> {@code
+ * int flagsForSourceStage = combineOpFlags(sourceFlags, INITIAL_OPS_VALUE);
+ * }</pre>
+ *
+ * The combined stream and operation flags of each subsequent intermediate
+ * operation stage in the pipeline is calculated as follows:
+ * <pre> {@code
+ * int flagsForThisStage = combineOpFlags(flagsForPreviousStage, thisOpFlags);
+ * }</pre>
+ *
+ * Finally the flags output from the last intermediate operation of the pipeline
+ * are combined with the operation flags of the terminal operation to produce
+ * the flags output from the pipeline.
+ *
+ * <p>Those flags can then be used to apply optimizations. For example, if
+ * {@code SIZED.isKnown(flags)} returns true then the stream size remains
+ * constant throughout the pipeline, this information can be utilized to
+ * pre-allocate data structures and combined with
+ * {@link java.util.Spliterator#SUBSIZED} that information can be utilized to
+ * perform concurrent in-place updates into a shared array.
+ *
+ * For specific details see the {@link AbstractPipeline} constructors.
+ *
+ * @since 1.8
+ */
+enum StreamOpFlag {
+
+ /*
+ * Each characteristic takes up 2 bits in a bit set to accommodate
+ * preserving, clearing and setting/injecting information.
+ *
+ * This applies to stream flags, intermediate/terminal operation flags, and
+ * combined stream and operation flags. Even though the former only requires
+ * 1 bit of information per characteristic, is it more efficient when
+ * combining flags to align set and inject bits.
+ *
+ * Characteristics belong to certain types, see the Type enum. Bit masks for
+ * the types are constructed as per the following table:
+ *
+ * DISTINCT SORTED ORDERED SIZED SHORT_CIRCUIT
+ * SPLITERATOR 01 01 01 01 00
+ * STREAM 01 01 01 01 00
+ * OP 11 11 11 10 01
+ * TERMINAL_OP 00 00 10 00 01
+ * UPSTREAM_TERMINAL_OP 00 00 10 00 00
+ *
+ * 01 = set/inject
+ * 10 = clear
+ * 11 = preserve
+ *
+ * Construction of the columns is performed using a simple builder for
+ * non-zero values.
+ */
+
+
+ // The following flags correspond to characteristics on Spliterator
+ // and the values MUST be equal.
+ //
+
+ /**
+ * Characteristic value signifying that, for each pair of
+ * encountered elements in a stream {@code x, y}, {@code !x.equals(y)}.
+ * <p>
+ * A stream may have this value or an intermediate operation can preserve,
+ * clear or inject this value.
+ */
+ // 0, 0x00000001
+ // Matches Spliterator.DISTINCT
+ DISTINCT(0,
+ set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)),
+
+ /**
+ * Characteristic value signifying that encounter order follows a natural
+ * sort order of comparable elements.
+ * <p>
+ * A stream can have this value or an intermediate operation can preserve,
+ * clear or inject this value.
+ * <p>
+ * Note: The {@link java.util.Spliterator#SORTED} characteristic can define
+ * a sort order with an associated non-null comparator. Augmenting flag
+ * state with addition properties such that those properties can be passed
+ * to operations requires some disruptive changes for a singular use-case.
+ * Furthermore, comparing comparators for equality beyond that of identity
+ * is likely to be unreliable. Therefore the {@code SORTED} characteristic
+ * for a defined non-natural sort order is not mapped internally to the
+ * {@code SORTED} flag.
+ */
+ // 1, 0x00000004
+ // Matches Spliterator.SORTED
+ SORTED(1,
+ set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)),
+
+ /**
+ * Characteristic value signifying that an encounter order is
+ * defined for stream elements.
+ * <p>
+ * A stream can have this value, an intermediate operation can preserve,
+ * clear or inject this value, or a terminal operation can preserve or clear
+ * this value.
+ */
+ // 2, 0x00000010
+ // Matches Spliterator.ORDERED
+ ORDERED(2,
+ set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP).clear(Type.TERMINAL_OP)
+ .clear(Type.UPSTREAM_TERMINAL_OP)),
+
+ /**
+ * Characteristic value signifying that size of the stream
+ * is of a known finite size that is equal to the known finite
+ * size of the source spliterator input to the first stream
+ * in the pipeline.
+ * <p>
+ * A stream can have this value or an intermediate operation can preserve or
+ * clear this value.
+ */
+ // 3, 0x00000040
+ // Matches Spliterator.SIZED
+ SIZED(3,
+ set(Type.SPLITERATOR).set(Type.STREAM).clear(Type.OP)),
+
+ // The following Spliterator characteristics are not currently used but a
+ // gap in the bit set is deliberately retained to enable corresponding
+ // stream flags if//when required without modification to other flag values.
+ //
+ // 4, 0x00000100 NONNULL(4, ...
+ // 5, 0x00000400 IMMUTABLE(5, ...
+ // 6, 0x00001000 CONCURRENT(6, ...
+ // 7, 0x00004000 SUBSIZED(7, ...
+
+ // The following 4 flags are currently undefined and a free for any further
+ // spliterator characteristics.
+ //
+ // 8, 0x00010000
+ // 9, 0x00040000
+ // 10, 0x00100000
+ // 11, 0x00400000
+
+ // The following flags are specific to streams and operations
+ //
+
+ /**
+ * Characteristic value signifying that an operation may short-circuit the
+ * stream.
+ * <p>
+ * An intermediate operation can preserve or inject this value,
+ * or a terminal operation can preserve or inject this value.
+ */
+ // 12, 0x01000000
+ SHORT_CIRCUIT(12,
+ set(Type.OP).set(Type.TERMINAL_OP));
+
+ // The following 2 flags are currently undefined and a free for any further
+ // stream flags if/when required
+ //
+ // 13, 0x04000000
+ // 14, 0x10000000
+ // 15, 0x40000000
+
+ /**
+ * Type of a flag
+ */
+ enum Type {
+ /**
+ * The flag is associated with spliterator characteristics.
+ */
+ SPLITERATOR,
+
+ /**
+ * The flag is associated with stream flags.
+ */
+ STREAM,
+
+ /**
+ * The flag is associated with intermediate operation flags.
+ */
+ OP,
+
+ /**
+ * The flag is associated with terminal operation flags.
+ */
+ TERMINAL_OP,
+
+ /**
+ * The flag is associated with terminal operation flags that are
+ * propagated upstream across the last stateful operation boundary
+ */
+ UPSTREAM_TERMINAL_OP
+ }
+
+ /**
+ * The bit pattern for setting/injecting a flag.
+ */
+ private static final int SET_BITS = 0b01;
+
+ /**
+ * The bit pattern for clearing a flag.
+ */
+ private static final int CLEAR_BITS = 0b10;
+
+ /**
+ * The bit pattern for preserving a flag.
+ */
+ private static final int PRESERVE_BITS = 0b11;
+
+ private static MaskBuilder set(Type t) {
+ return new MaskBuilder(new EnumMap<>(Type.class)).set(t);
+ }
+
+ private static class MaskBuilder {
+ final Map<Type, Integer> map;
+
+ MaskBuilder(Map<Type, Integer> map) {
+ this.map = map;
+ }
+
+ MaskBuilder mask(Type t, Integer i) {
+ map.put(t, i);
+ return this;
+ }
+
+ MaskBuilder set(Type t) {
+ return mask(t, SET_BITS);
+ }
+
+ MaskBuilder clear(Type t) {
+ return mask(t, CLEAR_BITS);
+ }
+
+ MaskBuilder setAndClear(Type t) {
+ return mask(t, PRESERVE_BITS);
+ }
+
+ Map<Type, Integer> build() {
+ for (Type t : Type.values()) {
+ map.putIfAbsent(t, 0b00);
+ }
+ return map;
+ }
+ }
+
+ /**
+ * The mask table for a flag, this is used to determine if a flag
+ * corresponds to a certain flag type and for creating mask constants.
+ */
+ private final Map<Type, Integer> maskTable;
+
+ /**
+ * The bit position in the bit mask.
+ */
+ private final int bitPosition;
+
+ /**
+ * The set 2 bit set offset at the bit position.
+ */
+ private final int set;
+
+ /**
+ * The clear 2 bit set offset at the bit position.
+ */
+ private final int clear;
+
+ /**
+ * The preserve 2 bit set offset at the bit position.
+ */
+ private final int preserve;
+
+ private StreamOpFlag(int position, MaskBuilder maskBuilder) {
+ this.maskTable = maskBuilder.build();
+ // Two bits per flag
+ position *= 2;
+ this.bitPosition = position;
+ this.set = SET_BITS << position;
+ this.clear = CLEAR_BITS << position;
+ this.preserve = PRESERVE_BITS << position;
+ }
+
+ /**
+ * Gets the bitmap associated with setting this characteristic.
+ *
+ * @return the bitmap for setting this characteristic
+ */
+ int set() {
+ return set;
+ }
+
+ /**
+ * Gets the bitmap associated with clearing this characteristic.
+ *
+ * @return the bitmap for clearing this characteristic
+ */
+ int clear() {
+ return clear;
+ }
+
+ /**
+ * Determines if this flag is a stream-based flag.
+ *
+ * @return true if a stream-based flag, otherwise false.
+ */
+ boolean isStreamFlag() {
+ return maskTable.get(Type.STREAM) > 0;
+ }
+
+ /**
+ * Checks if this flag is set on stream flags, injected on operation flags,
+ * and injected on combined stream and operation flags.
+ *
+ * @param flags the stream flags, operation flags, or combined stream and
+ * operation flags
+ * @return true if this flag is known, otherwise false.
+ */
+ boolean isKnown(int flags) {
+ return (flags & preserve) == set;
+ }
+
+ /**
+ * Checks if this flag is cleared on operation flags or combined stream and
+ * operation flags.
+ *
+ * @param flags the operation flags or combined stream and operations flags.
+ * @return true if this flag is preserved, otherwise false.
+ */
+ boolean isCleared(int flags) {
+ return (flags & preserve) == clear;
+ }
+
+ /**
+ * Checks if this flag is preserved on combined stream and operation flags.
+ *
+ * @param flags the combined stream and operations flags.
+ * @return true if this flag is preserved, otherwise false.
+ */
+ boolean isPreserved(int flags) {
+ return (flags & preserve) == preserve;
+ }
+
+ /**
+ * Determines if this flag can be set for a flag type.
+ *
+ * @param t the flag type.
+ * @return true if this flag can be set for the flag type, otherwise false.
+ */
+ boolean canSet(Type t) {
+ return (maskTable.get(t) & SET_BITS) > 0;
+ }
+
+ /**
+ * The bit mask for spliterator characteristics
+ */
+ static final int SPLITERATOR_CHARACTERISTICS_MASK = createMask(Type.SPLITERATOR);
+
+ /**
+ * The bit mask for source stream flags.
+ */
+ static final int STREAM_MASK = createMask(Type.STREAM);
+
+ /**
+ * The bit mask for intermediate operation flags.
+ */
+ static final int OP_MASK = createMask(Type.OP);
+
+ /**
+ * The bit mask for terminal operation flags.
+ */
+ static final int TERMINAL_OP_MASK = createMask(Type.TERMINAL_OP);
+
+ /**
+ * The bit mask for upstream terminal operation flags.
+ */
+ static final int UPSTREAM_TERMINAL_OP_MASK = createMask(Type.UPSTREAM_TERMINAL_OP);
+
+ private static int createMask(Type t) {
+ int mask = 0;
+ for (StreamOpFlag flag : StreamOpFlag.values()) {
+ mask |= flag.maskTable.get(t) << flag.bitPosition;
+ }
+ return mask;
+ }
+
+ /**
+ * Complete flag mask.
+ */
+ private static final int FLAG_MASK = createFlagMask();
+
+ private static int createFlagMask() {
+ int mask = 0;
+ for (StreamOpFlag flag : StreamOpFlag.values()) {
+ mask |= flag.preserve;
+ }
+ return mask;
+ }
+
+ /**
+ * Flag mask for stream flags that are set.
+ */
+ private static final int FLAG_MASK_IS = STREAM_MASK;
+
+ /**
+ * Flag mask for stream flags that are cleared.
+ */
+ private static final int FLAG_MASK_NOT = STREAM_MASK << 1;
+
+ /**
+ * The initial value to be combined with the stream flags of the first
+ * stream in the pipeline.
+ */
+ static final int INITIAL_OPS_VALUE = FLAG_MASK_IS | FLAG_MASK_NOT;
+
+ /**
+ * The bit value to set or inject {@link #DISTINCT}.
+ */
+ static final int IS_DISTINCT = DISTINCT.set;
+
+ /**
+ * The bit value to clear {@link #DISTINCT}.
+ */
+ static final int NOT_DISTINCT = DISTINCT.clear;
+
+ /**
+ * The bit value to set or inject {@link #SORTED}.
+ */
+ static final int IS_SORTED = SORTED.set;
+
+ /**
+ * The bit value to clear {@link #SORTED}.
+ */
+ static final int NOT_SORTED = SORTED.clear;
+
+ /**
+ * The bit value to set or inject {@link #ORDERED}.
+ */
+ static final int IS_ORDERED = ORDERED.set;
+
+ /**
+ * The bit value to clear {@link #ORDERED}.
+ */
+ static final int NOT_ORDERED = ORDERED.clear;
+
+ /**
+ * The bit value to set {@link #SIZED}.
+ */
+ static final int IS_SIZED = SIZED.set;
+
+ /**
+ * The bit value to clear {@link #SIZED}.
+ */
+ static final int NOT_SIZED = SIZED.clear;
+
+ /**
+ * The bit value to inject {@link #SHORT_CIRCUIT}.
+ */
+ static final int IS_SHORT_CIRCUIT = SHORT_CIRCUIT.set;
+
+ private static int getMask(int flags) {
+ return (flags == 0)
+ ? FLAG_MASK
+ : ~(flags | ((FLAG_MASK_IS & flags) << 1) | ((FLAG_MASK_NOT & flags) >> 1));
+ }
+
+ /**
+ * Combines stream or operation flags with previously combined stream and
+ * operation flags to produce updated combined stream and operation flags.
+ * <p>
+ * A flag set on stream flags or injected on operation flags,
+ * and injected combined stream and operation flags,
+ * will be injected on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag set on stream flags or injected on operation flags,
+ * and cleared on the combined stream and operation flags,
+ * will be cleared on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag set on the stream flags or injected on operation flags,
+ * and preserved on the combined stream and operation flags,
+ * will be injected on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag not set on the stream flags or cleared/preserved on operation
+ * flags, and injected on the combined stream and operation flags,
+ * will be injected on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag not set on the stream flags or cleared/preserved on operation
+ * flags, and cleared on the combined stream and operation flags,
+ * will be cleared on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag not set on the stream flags,
+ * and preserved on the combined stream and operation flags
+ * will be preserved on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag cleared on operation flags,
+ * and preserved on the combined stream and operation flags
+ * will be cleared on the updated combined stream and operation flags.
+ *
+ * <p>
+ * A flag preserved on operation flags,
+ * and preserved on the combined stream and operation flags
+ * will be preserved on the updated combined stream and operation flags.
+ *
+ * @param newStreamOrOpFlags the stream or operation flags.
+ * @param prevCombOpFlags previously combined stream and operation flags.
+ * The value {#link INITIAL_OPS_VALUE} must be used as the seed value.
+ * @return the updated combined stream and operation flags.
+ */
+ static int combineOpFlags(int newStreamOrOpFlags, int prevCombOpFlags) {
+ // 0x01 or 0x10 nibbles are transformed to 0x11
+ // 0x00 nibbles remain unchanged
+ // Then all the bits are flipped
+ // Then the result is logically or'ed with the operation flags.
+ return (prevCombOpFlags & StreamOpFlag.getMask(newStreamOrOpFlags)) | newStreamOrOpFlags;
+ }
+
+ /**
+ * Converts combined stream and operation flags to stream flags.
+ *
+ * <p>Each flag injected on the combined stream and operation flags will be
+ * set on the stream flags.
+ *
+ * @param combOpFlags the combined stream and operation flags.
+ * @return the stream flags.
+ */
+ static int toStreamFlags(int combOpFlags) {
+ // By flipping the nibbles 0x11 become 0x00 and 0x01 become 0x10
+ // Shift left 1 to restore set flags and mask off anything other than the set flags
+ return ((~combOpFlags) >> 1) & FLAG_MASK_IS & combOpFlags;
+ }
+
+ /**
+ * Converts stream flags to a spliterator characteristic bit set.
+ *
+ * @param streamFlags the stream flags.
+ * @return the spliterator characteristic bit set.
+ */
+ static int toCharacteristics(int streamFlags) {
+ return streamFlags & SPLITERATOR_CHARACTERISTICS_MASK;
+ }
+
+ /**
+ * Converts a spliterator characteristic bit set to stream flags.
+ *
+ * @implSpec
+ * If the spliterator is naturally {@code SORTED} (the associated
+ * {@code Comparator} is {@code null}) then the characteristic is converted
+ * to the {@link #SORTED} flag, otherwise the characteristic is not
+ * converted.
+ *
+ * @param spliterator the spliterator from which to obtain characteristic
+ * bit set.
+ * @return the stream flags.
+ */
+ static int fromCharacteristics(Spliterator<?> spliterator) {
+ int characteristics = spliterator.characteristics();
+ if ((characteristics & Spliterator.SORTED) != 0 && spliterator.getComparator() != null) {
+ // Do not propagate the SORTED characteristic if it does not correspond
+ // to a natural sort order
+ return characteristics & SPLITERATOR_CHARACTERISTICS_MASK & ~Spliterator.SORTED;
+ }
+ else {
+ return characteristics & SPLITERATOR_CHARACTERISTICS_MASK;
+ }
+ }
+
+ /**
+ * Converts a spliterator characteristic bit set to stream flags.
+ *
+ * @param characteristics the spliterator characteristic bit set.
+ * @return the stream flags.
+ */
+ static int fromCharacteristics(int characteristics) {
+ return characteristics & SPLITERATOR_CHARACTERISTICS_MASK;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/StreamShape.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+/**
+ * An enum describing the known shape specializations for stream abstractions.
+ * Each will correspond to a specific subinterface of {@link BaseStream}
+ * (e.g., {@code REFERENCE} corresponds to {@code Stream}, {@code INT_VALUE}
+ * corresponds to {@code IntStream}). Each may also correspond to
+ * specializations of value-handling abstractions such as {@code Spliterator},
+ * {@code Consumer}, etc.
+ *
+ * @apiNote
+ * This enum is used by implementations to determine compatibility between
+ * streams and operations (i.e., if the output shape of a stream is compatible
+ * with the input shape of the next operation).
+ *
+ * <p>Some APIs require you to specify both a generic type and a stream shape
+ * for input or output elements, such as {@link TerminalOp} which has both
+ * generic type parameters for its input types, and a getter for the
+ * input shape. When representing primitive streams in this way, the
+ * generic type parameter should correspond to the wrapper type for that
+ * primitive type.
+ *
+ * @since 1.8
+ */
+enum StreamShape {
+ /**
+ * The shape specialization corresponding to {@code Stream} and elements
+ * that are object references.
+ */
+ REFERENCE,
+ /**
+ * The shape specialization corresponding to {@code IntStream} and elements
+ * that are {@code int} values.
+ */
+ INT_VALUE,
+ /**
+ * The shape specialization corresponding to {@code LongStream} and elements
+ * that are {@code long} values.
+ */
+ LONG_VALUE,
+ /**
+ * The shape specialization corresponding to {@code DoubleStream} and
+ * elements that are {@code double} values.
+ */
+ DOUBLE_VALUE
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/TerminalOp.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.Spliterator;
+
+/**
+ * An operation in a stream pipeline that takes a stream as input and produces
+ * a result or side-effect. A {@code TerminalOp} has an input type and stream
+ * shape, and a result type. A {@code TerminalOp} also has a set of
+ * <em>operation flags</em> that describes how the operation processes elements
+ * of the stream (such as short-circuiting or respecting encounter order; see
+ * {@link StreamOpFlag}).
+ *
+ * <p>A {@code TerminalOp} must provide a sequential and parallel implementation
+ * of the operation relative to a given stream source and set of intermediate
+ * operations.
+ *
+ * @param <E_IN> the type of input elements
+ * @param <R> the type of the result
+ * @since 1.8
+ */
+interface TerminalOp<E_IN, R> {
+ /**
+ * Gets the shape of the input type of this operation.
+ *
+ * @implSpec The default returns {@code StreamShape.REFERENCE}.
+ *
+ * @return StreamShape of the input type of this operation
+ */
+ default StreamShape inputShape() { return StreamShape.REFERENCE; }
+
+ /**
+ * Gets the stream flags of the operation. Terminal operations may set a
+ * limited subset of the stream flags defined in {@link StreamOpFlag}, and
+ * these flags are combined with the previously combined stream and
+ * intermediate operation flags for the pipeline.
+ *
+ * @implSpec The default implementation returns zero.
+ *
+ * @return the stream flags for this operation
+ * @see StreamOpFlag
+ */
+ default int getOpFlags() { return 0; }
+
+ /**
+ * Performs a parallel evaluation of the operation using the specified
+ * {@code PipelineHelper}, which describes the upstream intermediate
+ * operations.
+ *
+ * @implSpec The default performs a sequential evaluation of the operation
+ * using the specified {@code PipelineHelper}.
+ *
+ * @param helper the pipeline helper
+ * @param spliterator the source spliterator
+ * @return the result of the evaluation
+ */
+ default <P_IN> R evaluateParallel(PipelineHelper<E_IN> helper,
+ Spliterator<P_IN> spliterator) {
+ if (Tripwire.ENABLED)
+ Tripwire.trip(getClass(), "{0} triggering TerminalOp.evaluateParallel serial default");
+ return evaluateSequential(helper, spliterator);
+ }
+
+ /**
+ * Performs a sequential evaluation of the operation using the specified
+ * {@code PipelineHelper}, which describes the upstream intermediate
+ * operations.
+ *
+ * @param helper the pipeline helper
+ * @param spliterator the source spliterator
+ * @return the result of the evaluation
+ */
+ <P_IN> R evaluateSequential(PipelineHelper<E_IN> helper,
+ Spliterator<P_IN> spliterator);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/TerminalSink.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.util.function.Supplier;
+
+/**
+ * A {@link Sink} which accumulates state as elements are accepted, and allows
+ * a result to be retrieved after the computation is finished.
+ *
+ * @param <T> the type of elements to be accepted
+ * @param <R> the type of the result
+ *
+ * @since 1.8
+ */
+interface TerminalSink<T, R> extends Sink<T>, Supplier<R> { }
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/Tripwire.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package java.util.stream;
+
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+
+import sun.util.logging.PlatformLogger;
+
+/**
+ * Utility class for detecting inadvertent uses of boxing in
+ * {@code java.util.stream} classes. The detection is turned on or off based on
+ * whether the system property {@code org.openjdk.java.util.stream.tripwire} is
+ * considered {@code true} according to {@link Boolean#getBoolean(String)}.
+ * This should normally be turned off for production use.
+ *
+ * @apiNote
+ * Typical usage would be for boxing code to do:
+ * <pre>{@code
+ * if (Tripwire.ENABLED)
+ * Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
+ * }</pre>
+ *
+ * @since 1.8
+ */
+final class Tripwire {
+ private static final String TRIPWIRE_PROPERTY = "org.openjdk.java.util.stream.tripwire";
+
+ /** Should debugging checks be enabled? */
+ static final boolean ENABLED = AccessController.doPrivileged(
+ (PrivilegedAction<Boolean>) () -> Boolean.getBoolean(TRIPWIRE_PROPERTY));
+
+ private Tripwire() { }
+
+ /**
+ * Produces a log warning, using {@code PlatformLogger.getLogger(className)},
+ * using the supplied message. The class name of {@code trippingClass} will
+ * be used as the first parameter to the message.
+ *
+ * @param trippingClass Name of the class generating the message
+ * @param msg A message format string of the type expected by
+ * {@link PlatformLogger}
+ */
+ static void trip(Class<?> trippingClass, String msg) {
+ PlatformLogger.getLogger(trippingClass.getName()).warning(msg, trippingClass.getName());
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/src/share/classes/java/util/stream/package-info.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * <h1>java.util.stream</h1>
+ *
+ * Classes to support functional-style operations on streams of values, as in the following:
+ *
+ * <pre>{@code
+ * int sumOfWeights = blocks.stream().filter(b -> b.getColor() == RED)
+ * .mapToInt(b -> b.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * <p>Here we use {@code blocks}, which might be a {@code Collection}, as a source for a stream,
+ * and then perform a filter-map-reduce ({@code sum()} is an example of a <a href="package-summary.html#Reduction">reduction</a>
+ * operation) on the stream to obtain the sum of the weights of the red blocks.
+ *
+ * <p>The key abstraction used in this approach is {@link java.util.stream.Stream}, as well as its primitive
+ * specializations {@link java.util.stream.IntStream}, {@link java.util.stream.LongStream},
+ * and {@link java.util.stream.DoubleStream}. Streams differ from Collections in several ways:
+ *
+ * <ul>
+ * <li>No storage. A stream is not a data structure that stores elements; instead, they
+ * carry values from a source (which could be a data structure, a generator, an IO channel, etc)
+ * through a pipeline of computational operations.</li>
+ * <li>Functional in nature. An operation on a stream produces a result, but does not modify
+ * its underlying data source. For example, filtering a {@code Stream} produces a new {@code Stream},
+ * rather than removing elements from the underlying source.</li>
+ * <li>Laziness-seeking. Many stream operations, such as filtering, mapping, or duplicate removal,
+ * can be implemented lazily, exposing opportunities for optimization. (For example, "find the first
+ * {@code String} matching a pattern" need not examine all the input strings.) Stream operations
+ * are divided into intermediate ({@code Stream}-producing) operations and terminal (value-producing)
+ * operations; all intermediate operations are lazy.</li>
+ * <li>Possibly unbounded. While collections have a finite size, streams need not. Operations
+ * such as {@code limit(n)} or {@code findFirst()} can allow computations on infinite streams
+ * to complete in finite time.</li>
+ * </ul>
+ *
+ * <h2><a name="StreamPipelines">Stream pipelines</a></h2>
+ *
+ * <p>Streams are used to create <em>pipelines</em> of <a href="package-summary.html#StreamOps">operations</a>. A
+ * complete stream pipeline has several components: a source (which may be a {@code Collection},
+ * an array, a generator function, or an IO channel); zero or more <em>intermediate operations</em>
+ * such as {@code Stream.filter} or {@code Stream.map}; and a <em>terminal operation</em> such
+ * as {@code Stream.forEach} or {@code java.util.stream.Stream.reduce}. Stream operations may take as parameters
+ * <em>function values</em> (which are often lambda expressions, but could be method references
+ * or objects) which parameterize the behavior of the operation, such as a {@code Predicate}
+ * passed to the {@code Stream#filter} method.
+ *
+ * <p>Intermediate operations return a new {@code Stream}. They are lazy; executing an
+ * intermediate operation such as {@link java.util.stream.Stream#filter Stream.filter} does
+ * not actually perform any filtering, instead creating a new {@code Stream} that, when
+ * traversed, contains the elements of the initial {@code Stream} that match the
+ * given {@code Predicate}. Consuming elements from the stream source does not
+ * begin until the terminal operation is executed.
+ *
+ * <p>Terminal operations consume the {@code Stream} and produce a result or a side-effect.
+ * After a terminal operation is performed, the stream can no longer be used and you must
+ * return to the data source, or select a new data source, to get a new stream. For example,
+ * obtaining the sum of weights of all red blocks, and then of all blue blocks, requires a
+ * filter-map-reduce on two different streams:
+ * <pre>{@code
+ * int sumOfRedWeights = blocks.stream().filter(b -> b.getColor() == RED)
+ * .mapToInt(b -> b.getWeight())
+ * .sum();
+ * int sumOfBlueWeights = blocks.stream().filter(b -> b.getColor() == BLUE)
+ * .mapToInt(b -> b.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * <p>However, there are other techniques that allow you to obtain both results in a single
+ * pass if multiple traversal is impractical or inefficient. TODO provide link
+ *
+ * <h3><a name="StreamOps">Stream operations</a></h3>
+ *
+ * <p>Intermediate stream operation (such as {@code filter} or {@code sorted}) always produce a
+ * new {@code Stream}, and are always<em>lazy</em>. Executing a lazy operations does not
+ * trigger processing of the stream contents; all processing is deferred until the terminal
+ * operation commences. Processing streams lazily allows for significant efficiencies; in a
+ * pipeline such as the filter-map-sum example above, filtering, mapping, and addition can be
+ * fused into a single pass, with minimal intermediate state. Laziness also enables us to avoid
+ * examining all the data when it is not necessary; for operations such as "find the first
+ * string longer than 1000 characters", one need not examine all the input strings, just enough
+ * to find one that has the desired characteristics. (This behavior becomes even more important
+ * when the input stream is infinite and not merely large.)
+ *
+ * <p>Intermediate operations are further divided into <em>stateless</em> and <em>stateful</em>
+ * operations. Stateless operations retain no state from previously seen values when processing
+ * a new value; examples of stateless intermediate operations include {@code filter} and
+ * {@code map}. Stateful operations may incorporate state from previously seen elements in
+ * processing new values; examples of stateful intermediate operations include {@code distinct}
+ * and {@code sorted}. Stateful operations may need to process the entire input before
+ * producing a result; for example, one cannot produce any results from sorting a stream until
+ * one has seen all elements of the stream. As a result, under parallel computation, some
+ * pipelines containing stateful intermediate operations have to be executed in multiple passes.
+ * Pipelines containing exclusively stateless intermediate operations can be processed in a
+ * single pass, whether sequential or parallel.
+ *
+ * <p>Further, some operations are deemed <em>short-circuiting</em> operations. An intermediate
+ * operation is short-circuiting if, when presented with infinite input, it may produce a
+ * finite stream as a result. A terminal operation is short-circuiting if, when presented with
+ * infinite input, it may terminate in finite time. (Having a short-circuiting operation is a
+ * necessary, but not sufficient, condition for the processing of an infinite stream to
+ * terminate normally in finite time.)
+ *
+ * Terminal operations (such as {@code forEach} or {@code findFirst}) are always eager
+ * (they execute completely before returning), and produce a non-{@code Stream} result, such
+ * as a primitive value or a {@code Collection}, or have side-effects.
+ *
+ * <h3>Parallelism</h3>
+ *
+ * <p>By recasting aggregate operations as a pipeline of operations on a stream of values, many
+ * aggregate operations can be more easily parallelized. A {@code Stream} can execute either
+ * in serial or in parallel. When streams are created, they are either created as sequential
+ * or parallel streams; the parallel-ness of streams can also be switched by the
+ * {@link java.util.stream Stream#sequential()} and {@link java.util.stream.Stream#parallel()}
+ * operations. The {@code Stream} implementations in the JDK create serial streams unless
+ * parallelism is explicitly requested. For example, {@code Collection} has methods
+ * {@link java.util.Collection#stream} and {@link java.util.Collection#parallelStream},
+ * which produce sequential and parallel streams respectively; other stream-bearing methods
+ * such as {@link java.util.stream.Streams#intRange(int, int)} produce sequential
+ * streams but these can be efficiently parallelized by calling {@code parallel()} on the
+ * result. The set of operations on serial and parallel streams is identical. To execute the
+ * "sum of weights of blocks" query in parallel, we would do:
+ *
+ * <pre>{@code
+ * int sumOfWeights = blocks.parallelStream().filter(b -> b.getColor() == RED)
+ * .mapToInt(b -> b.getWeight())
+ * .sum();
+ * }</pre>
+ *
+ * <p>The only difference between the serial and parallel versions of this example code is
+ * the creation of the initial {@code Stream}. Whether a {@code Stream} will execute in serial
+ * or parallel can be determined by the {@code Stream#isParallel} method. When the terminal
+ * operation is initiated, the entire stream pipeline is either executed sequentially or in
+ * parallel, determined by the last operation that affected the stream's serial-parallel
+ * orientation (which could be the stream source, or the {@code sequential()} or
+ * {@code parallel()} methods.)
+ *
+ * <p>In order for the results of parallel operations to be deterministic and consistent with
+ * their serial equivalent, the function values passed into the various stream operations should
+ * be <a href="#NonInteference"><em>stateless</em></a>.
+ *
+ * <h3><a name="Ordering">Ordering</a></h3>
+ *
+ * <p>Streams may or may not have an <em>encounter order</em>. An encounter
+ * order specifies the order in which elements are provided by the stream to the
+ * operations pipeline. Whether or not there is an encounter order depends on
+ * the source, the intermediate operations, and the terminal operation.
+ * Certain stream sources (such as {@code List} or arrays) are intrinsically
+ * ordered, whereas others (such as {@code HashSet}) are not. Some intermediate
+ * operations may impose an encounter order on an otherwise unordered stream,
+ * such as {@link java.util.stream.Stream#sorted()}, and others may render an
+ * ordered stream unordered (such as {@link java.util.stream.Stream#unordered()}).
+ * Some terminal operations may ignore encounter order, such as
+ * {@link java.util.stream.Stream#forEach}.
+ *
+ * <p>If a Stream is ordered, most operations are constrained to operate on the
+ * elements in their encounter order; if the source of a stream is a {@code List}
+ * containing {@code [1, 2, 3]}, then the result of executing {@code map(x -> x*2)}
+ * must be {@code [2, 4, 6]}. However, if the source has no defined encounter
+ * order, than any of the six permutations of the values {@code [2, 4, 6]} would
+ * be a valid result. Many operations can still be efficiently parallelized even
+ * under ordering constraints.
+ *
+ * <p>For sequential streams, ordering is only relevant to the determinism
+ * of operations performed repeatedly on the same source. (An {@code ArrayList}
+ * is constrained to iterate elements in order; a {@code HashSet} is not, and
+ * repeated iteration might produce a different order.)
+ *
+ * <p>For parallel streams, relaxing the ordering constraint can enable
+ * optimized implementation for some operations. For example, duplicate
+ * filtration on an ordered stream must completely process the first partition
+ * before it can return any elements from a subsequent partition, even if those
+ * elements are available earlier. On the other hand, without the constraint of
+ * ordering, duplicate filtration can be done more efficiently by using
+ * a shared {@code ConcurrentHashSet}. There will be cases where the stream
+ * is structurally ordered (the source is ordered and the intermediate
+ * operations are order-preserving), but the user does not particularly care
+ * about the encounter order. In some cases, explicitly de-ordering the stream
+ * with the {@link java.util.stream.Stream#unordered()} method may result in
+ * improved parallel performance for some stateful or terminal operations.
+ *
+ * <h3><a name="Non-Interference">Non-interference</a></h3>
+ *
+ * The {@code java.util.stream} package enables you to execute possibly-parallel
+ * bulk-data operations over a variety of data sources, including even non-thread-safe
+ * collections such as {@code ArrayList}. This is possible only if we can
+ * prevent <em>interference</em> with the data source during the execution of a
+ * stream pipeline. (Execution begins when the terminal operation is invoked, and ends
+ * when the terminal operation completes.) For most data sources, preventing interference
+ * means ensuring that the data source is <em>not modified at all</em> during the execution
+ * of the stream pipeline. (Some data sources, such as concurrent collections, are
+ * specifically designed to handle concurrent modification.)
+ *
+ * <p>Accordingly, lambda expressions (or other objects implementing the appropriate functional
+ * interface) passed to stream methods should never modify the stream's data source. An
+ * implementation is said to <em>interfere</em> with the data source if it modifies, or causes
+ * to be modified, the stream's data source. The need for non-interference applies to all
+ * pipelines, not just parallel ones. Unless the stream source is concurrent, modifying a
+ * stream's data source during execution of a stream pipeline can cause exceptions, incorrect
+ * answers, or nonconformant results.
+ *
+ * <p>Further, results may be nondeterministic or incorrect if the lambda expressions passed to
+ * stream operations are <em>stateful</em>. A stateful lambda (or other object implementing the
+ * appropriate functional interface) is one whose result depends on any state which might change
+ * during the execution of the stream pipeline. An example of a stateful lambda is:
+ * <pre>{@code
+ * Set<Integer> seen = Collections.synchronizedSet(new HashSet<>());
+ * stream.parallel().map(e -> { if (seen.add(e)) return 0; else return e; })...
+ * }</pre>
+ * Here, if the mapping operation is performed in parallel, the results for the same input
+ * could vary from run to run, due to thread scheduling differences, whereas, with a stateless
+ * lambda expression the results would always be the same.
+ *
+ * <h3>Side-effects</h3>
+ *
+ * <h2><a name="Reduction">Reduction operations</a></h2>
+ *
+ * A <em>reduction</em> operation takes a stream of elements and processes them in a way
+ * that reduces to a single value or summary description, such as finding the sum or maximum
+ * of a set of numbers. (In more complex scenarios, the reduction operation might need to
+ * extract data from the elements before reducing that data to a single value, such as
+ * finding the sum of weights of a set of blocks. This would require extracting the weight
+ * from each block before summing up the weights.)
+ *
+ * <p>Of course, such operations can be readily implemented as simple sequential loops, as in:
+ * <pre>{@code
+ * int sum = 0;
+ * for (int x : numbers) {
+ * sum += x;
+ * }
+ * }</pre>
+ * However, there may be a significant advantage to preferring a {@link java.util.stream.Stream#reduce reduce operation}
+ * over a mutative accumulation such as the above -- a properly constructed reduce operation is
+ * inherently parallelizable so long as the
+ * {@link java.util.function.BinaryOperator reduction operaterator}
+ * has the right characteristics. Specifically the operator must be
+ * <a href="#Associativity">associative</a>. For example, given a
+ * stream of numbers for which we want to find the sum, we can write:
+ * <pre>{@code
+ * int sum = numbers.reduce(0, (x,y) -> x+y);
+ * }</pre>
+ * or more succinctly:
+ * <pre>{@code
+ * int sum = numbers.reduce(0, Integer::sum);
+ * }</pre>
+ *
+ * <p>(The primitive specializations of {@link java.util.stream.Stream}, such as
+ * {@link java.util.stream.IntStream}, even have convenience methods for common reductions,
+ * such as {@link java.util.stream.IntStream#sum() sum} and {@link java.util.stream.IntStream#max() max},
+ * which are implemented as simple wrappers around reduce.)
+ *
+ * <p>Reduction parallellizes well since the implementation of {@code reduce} can operate on
+ * subsets of the stream in parallel, and then combine the intermediate results to get the final
+ * correct answer. Even if you were to use a parallelizable form of the
+ * {@link java.util.stream.Stream#forEach(Consumer) forEach()} method
+ * in place of the original for-each loop above, you would still have to provide thread-safe
+ * updates to the shared accumulating variable {@code sum}, and the required synchronization
+ * would likely eliminate any performance gain from parallelism. Using a {@code reduce} method
+ * instead removes all of the burden of parallelizing the reduction operation, and the library
+ * can provide an efficient parallel implementation with no additional synchronization needed.
+ *
+ * <p>The "blocks" examples shown earlier shows how reduction combines with other operations
+ * to replace for loops with bulk operations. If {@code blocks} is a collection of {@code Block}
+ * objects, which have a {@code getWeight} method, we can find the heaviest block with:
+ * <pre>{@code
+ * OptionalInt heaviest = blocks.stream()
+ * .mapToInt(Block::getWeight)
+ * .reduce(Integer::max);
+ * }</pre>
+ *
+ * <p>In its more general form, a {@code reduce} operation on elements of type {@code <T>}
+ * yielding a result of type {@code <U>} requires three parameters:
+ * <pre>{@code
+ * <U> U reduce(U identity,
+ * BiFunction<U, ? super T, U> accumlator,
+ * BinaryOperator<U> combiner);
+ * }</pre>
+ * Here, the <em>identity</em> element is both an initial seed for the reduction, and a default
+ * result if there are no elements. The <em>accumulator</em> function takes a partial result and
+ * the next element, and produce a new partial result. The <em>combiner</em> function combines
+ * the partial results of two accumulators to produce a new partial result, and eventually the
+ * final result.
+ *
+ * <p>This form is a generalization of the two-argument form, and is also a generalization of
+ * the map-reduce construct illustrated above. If we wanted to re-cast the simple {@code sum}
+ * example using the more general form, {@code 0} would be the identity element, while
+ * {@code Integer::sum} would be both the accumulator and combiner. For the sum-of-weights
+ * example, this could be re-cast as:
+ * <pre>{@code
+ * int sumOfWeights = blocks.stream().reduce(0,
+ * (sum, b) -> sum + b.getWeight())
+ * Integer::sum);
+ * }</pre>
+ * though the map-reduce form is more readable and generally preferable. The generalized form
+ * is provided for cases where significant work can be optimized away by combining mapping and
+ * reducing into a single function.
+ *
+ * <p>More formally, the {@code identity} value must be an <em>identity</em> for the combiner
+ * function. This means that for all {@code u}, {@code combiner.apply(identity, u)} is equal
+ * to {@code u}. Additionally, the {@code combiner} function must be
+ * <a href="#Associativity">associative</a> and must be compatible with the {@code accumulator}
+ * function; for all {@code u} and {@code t}, the following must hold:
+ * <pre>{@code
+ * combiner.apply(u, accumulator.apply(identity, t)) == accumulator.apply(u, t)
+ * }</pre>
+ *
+ * <h3><a name="MutableReduction">Mutable Reduction</a></h3>
+ *
+ * A <em>mutable</em> reduction operation is similar to an ordinary reduction, in that it reduces
+ * a stream of values to a single value, but instead of producing a distinct single-valued result, it
+ * mutates a general <em>result container</em>, such as a {@code Collection} or {@code StringBuilder},
+ * as it processes the elements in the stream.
+ *
+ * <p>For example, if we wanted to take a stream of strings and concatenate them into a single
+ * long string, we <em>could</em> achieve this with ordinary reduction:
+ * <pre>{@code
+ * String concatenated = strings.reduce("", String::concat)
+ * }</pre>
+ *
+ * We would get the desired result, and it would even work in parallel. However, we might not
+ * be happy about the performance! Such an implementation would do a great deal of string
+ * copying, and the run time would be <em>O(n^2)</em> in the number of elements. A more
+ * performant approach would be to accumulate the results into a {@link java.lang.StringBuilder}, which
+ * is a mutable container for accumulating strings. We can use the same technique to
+ * parallelize mutable reduction as we do with ordinary reduction.
+ *
+ * <p>The mutable reduction operation is called {@link java.util.stream.Stream#collect(Collector) collect()}, as it
+ * collects together the desired results into a result container such as {@code StringBuilder}.
+ * A {@code collect} operation requires three things: a factory function which will construct
+ * new instances of the result container, an accumulating function that will update a result
+ * container by incorporating a new element, and a combining function that can take two
+ * result containers and merge their contents. The form of this is very similar to the general
+ * form of ordinary reduction:
+ * <pre>{@code
+ * <R> R collect(Supplier<R> resultFactory,
+ * BiConsumer<R, ? super T> accumulator,
+ * BiConsumer<R, R> combiner);
+ * }</pre>
+ * As with {@code reduce()}, the benefit of expressing {@code collect} in this abstract way is
+ * that it is directly amenable to parallelization: we can accumulate partial results in parallel
+ * and then combine them. For example, to collect the String representations of the elements
+ * in a stream into an {@code ArrayList}, we could write the obvious sequential for-each form:
+ * <pre>{@code
+ * ArrayList<String> strings = new ArrayList<>();
+ * for (T element : stream) {
+ * strings.add(element.toString());
+ * }
+ * }</pre>
+ * Or we could use a parallelizable collect form:
+ * <pre>{@code
+ * ArrayList<String> strings = stream.collect(() -> new ArrayList<>(),
+ * (c, e) -> c.add(e.toString()),
+ * (c1, c2) -> c1.addAll(c2));
+ * }</pre>
+ * or, noting that we have buried a mapping operation inside the accumulator function, more
+ * succinctly as:
+ * <pre>{@code
+ * ArrayList<String> strings = stream.map(Object::toString)
+ * .collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
+ * }</pre>
+ * Here, our supplier is just the {@link java.util.ArrayList#ArrayList() ArrayList constructor}, the
+ * accumulator adds the stringified element to an {@code ArrayList}, and the combiner simply
+ * uses {@link java.util.ArrayList#addAll addAll} to copy the strings from one container into the other.
+ *
+ * <p>As with the regular reduction operation, the ability to parallelize only comes if an
+ * <a href="package-summary.html#Associativity">associativity</a> condition is met. The {@code combiner} is associative
+ * if for result containers {@code r1}, {@code r2}, and {@code r3}:
+ * <pre>{@code
+ * combiner.accept(r1, r2);
+ * combiner.accept(r1, r3);
+ * }</pre>
+ * is equivalent to
+ * <pre>{@code
+ * combiner.accept(r2, r3);
+ * combiner.accept(r1, r2);
+ * }</pre>
+ * where equivalence means that {@code r1} is left in the same state (according to the meaning
+ * of {@link java.lang.Object#equals equals} for the element types). Similarly, the {@code resultFactory}
+ * must act as an <em>identity</em> with respect to the {@code combiner} so that for any result
+ * container {@code r}:
+ * <pre>{@code
+ * combiner.accept(r, resultFactory.get());
+ * }</pre>
+ * does not modify the state of {@code r} (again according to the meaning of
+ * {@link java.lang.Object#equals equals}). Finally, the {@code accumulator} and {@code combiner} must be
+ * compatible such that for a result container {@code r} and element {@code t}:
+ * <pre>{@code
+ * r2 = resultFactory.get();
+ * accumulator.accept(r2, t);
+ * combiner.accept(r, r2);
+ * }</pre>
+ * is equivalent to:
+ * <pre>{@code
+ * accumulator.accept(r,t);
+ * }</pre>
+ * where equivalence means that {@code r} is left in the same state (again according to the
+ * meaning of {@link java.lang.Object#equals equals}).
+ *
+ * <p> The three aspects of {@code collect}: supplier, accumulator, and combiner, are often very
+ * tightly coupled, and it is convenient to introduce the notion of a {@link java.util.stream.Collector} as
+ * being an object that embodies all three aspects. There is a {@link java.util.stream.Stream#collect(Collector) collect}
+ * method that simply takes a {@code Collector} and returns the resulting container.
+ * The above example for collecting strings into a {@code List} can be rewritten using a
+ * standard {@code Collector} as:
+ * <pre>{@code
+ * ArrayList<String> strings = stream.map(Object::toString)
+ * .collect(Collectors.toList());
+ * }</pre>
+ *
+ * <h3><a name="ConcurrentReduction">Reduction, Concurrency, and Ordering</a></h3>
+ *
+ * With some complex reduction operations, for example a collect that produces a
+ * {@code Map}, such as:
+ * <pre>{@code
+ * Map<Buyer, List<Transaction>> salesByBuyer
+ * = txns.parallelStream()
+ * .collect(Collectors.groupingBy(Transaction::getBuyer));
+ * }</pre>
+ * (where {@link java.util.stream.Collectors#groupingBy} is a utility function
+ * that returns a {@link java.util.stream.Collector} for grouping sets of elements based on some key)
+ * it may actually be counterproductive to perform the operation in parallel.
+ * This is because the combining step (merging one {@code Map} into another by key)
+ * can be expensive for some {@code Map} implementations.
+ *
+ * <p>Suppose, however, that the result container used in this reduction
+ * was a concurrently modifiable collection -- such as a
+ * {@link java.util.concurrent.ConcurrentHashMap ConcurrentHashMap}. In that case,
+ * the parallel invocations of the accumulator could actually deposit their results
+ * concurrently into the same shared result container, eliminating the need for the combiner to
+ * merge distinct result containers. This potentially provides a boost
+ * to the parallel execution performance. We call this a <em>concurrent</em> reduction.
+ *
+ * <p>A {@link java.util.stream.Collector} that supports concurrent reduction is marked with the
+ * {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic.
+ * Having a concurrent collector is a necessary condition for performing a
+ * concurrent reduction, but that alone is not sufficient. If you imagine multiple
+ * accumulators depositing results into a shared container, the order in which
+ * results are deposited is non-deterministic. Consequently, a concurrent reduction
+ * is only possible if ordering is not important for the stream being processed.
+ * The {@link java.util.stream.Stream#collect(Collector)}
+ * implementation will only perform a concurrent reduction if
+ * <ul>
+ * <li>The stream is parallel;</li>
+ * <li>The collector has the
+ * {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic,
+ * and;</li>
+ * <li>Either the stream is unordered, or the collector has the
+ * {@link java.util.stream.Collector.Characteristics#UNORDERED} characteristic.
+ * </ul>
+ * For example:
+ * <pre>{@code
+ * Map<Buyer, List<Transaction>> salesByBuyer
+ * = txns.parallelStream()
+ * .unordered()
+ * .collect(groupingByConcurrent(Transaction::getBuyer));
+ * }</pre>
+ * (where {@link java.util.stream.Collectors#groupingByConcurrent} is the concurrent companion
+ * to {@code groupingBy}).
+ *
+ * <p>Note that if it is important that the elements for a given key appear in the
+ * order they appear in the source, then we cannot use a concurrent reduction,
+ * as ordering is one of the casualties of concurrent insertion. We would then
+ * be constrained to implement either a sequential reduction or a merge-based
+ * parallel reduction.
+ *
+ * <h2><a name="Associativity">Associativity</a></h2>
+ *
+ * An operator or function {@code op} is <em>associative</em> if the following holds:
+ * <pre>{@code
+ * (a op b) op c == a op (b op c)
+ * }</pre>
+ * The importance of this to parallel evaluation can be seen if we expand this to four terms:
+ * <pre>{@code
+ * a op b op c op d == (a op b) op (c op d)
+ * }</pre>
+ * So we can evaluate {@code (a op b)} in parallel with {@code (c op d)} and then invoke {@code op} on
+ * the results.
+ * TODO what does associative mean for mutative combining functions?
+ * FIXME: we described mutative associativity above.
+ *
+ * <h2><a name="StreamSources">Stream sources</a></h2>
+ * TODO where does this section go?
+ *
+ * XXX - change to section to stream construction gradually introducing more
+ * complex ways to construct
+ * - construction from Collection
+ * - construction from Iterator
+ * - construction from array
+ * - construction from generators
+ * - construction from spliterator
+ *
+ * XXX - the following is quite low-level but important aspect of stream constriction
+ *
+ * <p>A pipeline is initially constructed from a spliterator (see {@link java.util.Spliterator}) supplied by a stream source.
+ * The spliterator covers elements of the source and provides element traversal operations
+ * for a possibly-parallel computation. See methods on {@link java.util.stream.Streams} for construction
+ * of pipelines using spliterators.
+ *
+ * <p>A source may directly supply a spliterator. If so, the spliterator is traversed, split, or queried
+ * for estimated size after, and never before, the terminal operation commences. It is strongly recommended
+ * that the spliterator report a characteristic of {@code IMMUTABLE} or {@code CONCURRENT}, or be
+ * <em>late-binding</em> and not bind to the elements it covers until traversed, split or queried for
+ * estimated size.
+ *
+ * <p>If a source cannot directly supply a recommended spliterator then it may indirectly supply a spliterator
+ * using a {@code Supplier}. The spliterator is obtained from the supplier after, and never before, the terminal
+ * operation of the stream pipeline commences.
+ *
+ * <p>Such requirements significantly reduce the scope of potential interference to the interval starting
+ * with the commencing of the terminal operation and ending with the producing a result or side-effect. See
+ * <a href="package-summary.html#Non-Interference">Non-Interference</a> for
+ * more details.
+ *
+ * XXX - move the following to the non-interference section
+ *
+ * <p>A source can be modified before the terminal operation commences and those modifications will be reflected in
+ * the covered elements. Afterwards, and depending on the properties of the source, further modifications
+ * might not be reflected and the throwing of a {@code ConcurrentModificationException} may occur.
+ *
+ * <p>For example, consider the following code:
+ * <pre>{@code
+ * List<String> l = new ArrayList(Arrays.asList("one", "two"));
+ * Stream<String> sl = l.stream();
+ * l.add("three");
+ * String s = sl.collect(toStringJoiner(" ")).toString();
+ * }</pre>
+ * First a list is created consisting of two strings: "one"; and "two". Then a stream is created from that list.
+ * Next the list is modified by adding a third string: "three". Finally the elements of the stream are collected
+ * and joined together. Since the list was modified before the terminal {@code collect} operation commenced
+ * the result will be a string of "one two three". However, if the list is modified after the terminal operation
+ * commences, as in:
+ * <pre>{@code
+ * List<String> l = new ArrayList(Arrays.asList("one", "two"));
+ * Stream<String> sl = l.stream();
+ * String s = sl.peek(s -> l.add("BAD LAMBDA")).collect(toStringJoiner(" ")).toString();
+ * }</pre>
+ * then a {@code ConcurrentModificationException} will be thrown since the {@code peek} operation will attempt
+ * to add the string "BAD LAMBDA" to the list after the terminal operation has commenced.
+ */
+
+package java.util.stream;
--- a/jdk/src/share/classes/sun/misc/PerfCounter.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/misc/PerfCounter.java Tue Apr 23 11:13:38 2013 +0100
@@ -62,7 +62,7 @@
private PerfCounter(String name, int type) {
this.name = name;
- ByteBuffer bb = perf.createLong(name, U_None, type, 0L);
+ ByteBuffer bb = perf.createLong(name, type, U_None, 0L);
bb.order(ByteOrder.nativeOrder());
this.lb = bb.asLongBuffer();
}
--- a/jdk/src/share/classes/sun/net/www/protocol/http/DigestAuthentication.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/net/www/protocol/http/DigestAuthentication.java Tue Apr 23 11:13:38 2013 +0100
@@ -364,17 +364,18 @@
+ ncfield
+ ", uri=\"" + uri
+ "\", response=\"" + response
- + "\", algorithm=\"" + algorithm;
+ + "\", algorithm=" + algorithm;
if (opaque != null) {
- value = value + "\", opaque=\"" + opaque;
+ value = value + ", opaque=\"" + opaque;
+ value = value + "\"";
}
if (cnonce != null) {
- value = value + "\", cnonce=\"" + cnonce;
+ value = value + ", cnonce=\"" + cnonce;
+ value = value + "\"";
}
if (qop) {
- value = value + "\", qop=\"auth";
+ value = value + ", qop=auth";
}
- value = value + "\"";
return value;
}
--- a/jdk/src/share/classes/sun/security/pkcs/PKCS7.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/security/pkcs/PKCS7.java Tue Apr 23 11:13:38 2013 +0100
@@ -784,6 +784,9 @@
* @param signatureAlgorithm the name of the signature algorithm
* @param tsaURI the URI of the Timestamping Authority; or null if no
* timestamp is requested
+ * @param tSAPolicyID the TSAPolicyID of the Timestamping Authority as a
+ * numerical object identifier; or null if we leave the TSA server
+ * to choose one. This argument is only used when tsaURI is provided
* @return the bytes of the encoded PKCS #7 signed data message
* @throws NoSuchAlgorithmException The exception is thrown if the signature
* algorithm is unrecognised.
@@ -798,7 +801,8 @@
X509Certificate[] signerChain,
byte[] content,
String signatureAlgorithm,
- URI tsaURI)
+ URI tsaURI,
+ String tSAPolicyID)
throws CertificateException, IOException, NoSuchAlgorithmException
{
@@ -807,7 +811,7 @@
if (tsaURI != null) {
// Timestamp the signature
HttpTimestamper tsa = new HttpTimestamper(tsaURI);
- byte[] tsToken = generateTimestampToken(tsa, signature);
+ byte[] tsToken = generateTimestampToken(tsa, tSAPolicyID, signature);
// Insert the timestamp token into the PKCS #7 signer info element
// (as an unsigned attribute)
@@ -851,14 +855,20 @@
* set to true.
*
* @param tsa the timestamping authority to use
+ * @param tSAPolicyID the TSAPolicyID of the Timestamping Authority as a
+ * numerical object identifier; or null if we leave the TSA server
+ * to choose one
* @param toBeTimestamped the token that is to be timestamped
* @return the encoded timestamp token
* @throws IOException The exception is thrown if an error occurs while
- * communicating with the TSA.
+ * communicating with the TSA, or a non-null
+ * TSAPolicyID is specified in the request but it
+ * does not match the one in the reply
* @throws CertificateException The exception is thrown if the TSA's
* certificate is not permitted for timestamping.
*/
private static byte[] generateTimestampToken(Timestamper tsa,
+ String tSAPolicyID,
byte[] toBeTimestamped)
throws IOException, CertificateException
{
@@ -868,7 +878,7 @@
try {
// SHA-1 is always used.
messageDigest = MessageDigest.getInstance("SHA-1");
- tsQuery = new TSRequest(toBeTimestamped, messageDigest);
+ tsQuery = new TSRequest(tSAPolicyID, toBeTimestamped, messageDigest);
} catch (NoSuchAlgorithmException e) {
// ignore
}
@@ -889,6 +899,12 @@
tsReply.getStatusCodeAsText() + " " +
tsReply.getFailureCodeAsText());
}
+
+ if (tSAPolicyID != null &&
+ !tSAPolicyID.equals(tsReply.getTimestampToken().getPolicyID())) {
+ throw new IOException("TSAPolicyID changed in "
+ + "timestamp token");
+ }
PKCS7 tsToken = tsReply.getToken();
TimestampToken tst = tsReply.getTimestampToken();
--- a/jdk/src/share/classes/sun/security/timestamp/TSRequest.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/security/timestamp/TSRequest.java Tue Apr 23 11:13:38 2013 +0100
@@ -88,9 +88,10 @@
* @param messageDigest The MessageDigest of the hash algorithm to use.
* @throws NoSuchAlgorithmException if the hash algorithm is not supported
*/
- public TSRequest(byte[] toBeTimeStamped, MessageDigest messageDigest)
+ public TSRequest(String tSAPolicyID, byte[] toBeTimeStamped, MessageDigest messageDigest)
throws NoSuchAlgorithmException {
+ this.policyId = tSAPolicyID;
this.hashAlgorithmId = AlgorithmId.get(messageDigest.getAlgorithm());
this.hashValue = messageDigest.digest(toBeTimeStamped);
}
--- a/jdk/src/share/classes/sun/security/timestamp/TimestampToken.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/security/timestamp/TimestampToken.java Tue Apr 23 11:13:38 2013 +0100
@@ -115,6 +115,10 @@
return nonce;
}
+ public String getPolicyID() {
+ return policy.toString();
+ }
+
public BigInteger getSerialNumber() {
return serialNumber;
}
--- a/jdk/src/share/classes/sun/security/tools/jarsigner/Main.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/security/tools/jarsigner/Main.java Tue Apr 23 11:13:38 2013 +0100
@@ -141,6 +141,7 @@
String tsaUrl; // location of the Timestamping Authority
String tsaAlias; // alias for the Timestamping Authority's certificate
String altCertChain; // file to read alternative cert chain from
+ String tSAPolicyID;
boolean verify = false; // verify the jar
String verbose = null; // verbose output when signing/verifying
boolean showcerts = false; // show certs when verifying
@@ -331,6 +332,9 @@
} else if (collator.compare(flags, "-certchain") ==0) {
if (++n == args.length) usageNoArg();
altCertChain = args[n];
+ } else if (collator.compare(flags, "-tsapolicyid") ==0) {
+ if (++n == args.length) usageNoArg();
+ tSAPolicyID = args[n];
} else if (collator.compare(flags, "-debug") ==0) {
debug = true;
} else if (collator.compare(flags, "-keypass") ==0) {
@@ -531,6 +535,9 @@
(".tsacert.alias.public.key.certificate.for.Timestamping.Authority"));
System.out.println();
System.out.println(rb.getString
+ (".tsapolicyid.tsapolicyid.for.Timestamping.Authority"));
+ System.out.println();
+ System.out.println(rb.getString
(".altsigner.class.class.name.of.an.alternative.signing.mechanism"));
System.out.println();
System.out.println(rb.getString
@@ -1232,7 +1239,7 @@
try {
block =
sf.generateBlock(privateKey, sigalg, certChain,
- externalSF, tsaUrl, tsaCert, signingMechanism, args,
+ externalSF, tsaUrl, tsaCert, tSAPolicyID, signingMechanism, args,
zipFile);
} catch (SocketTimeoutException e) {
// Provide a helpful message when TSA is beyond a firewall
@@ -2206,13 +2213,14 @@
X509Certificate[] certChain,
boolean externalSF, String tsaUrl,
X509Certificate tsaCert,
+ String tSAPolicyID,
ContentSigner signingMechanism,
String[] args, ZipFile zipFile)
throws NoSuchAlgorithmException, InvalidKeyException, IOException,
SignatureException, CertificateException
{
return new Block(this, privateKey, sigalg, certChain, externalSF,
- tsaUrl, tsaCert, signingMechanism, args, zipFile);
+ tsaUrl, tsaCert, tSAPolicyID, signingMechanism, args, zipFile);
}
@@ -2226,7 +2234,7 @@
*/
Block(SignatureFile sfg, PrivateKey privateKey, String sigalg,
X509Certificate[] certChain, boolean externalSF, String tsaUrl,
- X509Certificate tsaCert, ContentSigner signingMechanism,
+ X509Certificate tsaCert, String tSAPolicyID, ContentSigner signingMechanism,
String[] args, ZipFile zipFile)
throws NoSuchAlgorithmException, InvalidKeyException, IOException,
SignatureException, CertificateException {
@@ -2309,7 +2317,7 @@
// Assemble parameters for the signing mechanism
ContentSignerParameters params =
- new JarSignerParameters(args, tsaUri, tsaCert, signature,
+ new JarSignerParameters(args, tsaUri, tsaCert, tSAPolicyID, signature,
signatureAlgorithm, certChain, content, zipFile);
// Generate the signature block
@@ -2353,11 +2361,13 @@
private X509Certificate[] signerCertificateChain;
private byte[] content;
private ZipFile source;
+ private String tSAPolicyID;
/**
* Create a new object.
*/
JarSignerParameters(String[] args, URI tsa, X509Certificate tsaCertificate,
+ String tSAPolicyID,
byte[] signature, String signatureAlgorithm,
X509Certificate[] signerCertificateChain, byte[] content,
ZipFile source) {
@@ -2369,6 +2379,7 @@
this.args = args;
this.tsa = tsa;
this.tsaCertificate = tsaCertificate;
+ this.tSAPolicyID = tSAPolicyID;
this.signature = signature;
this.signatureAlgorithm = signatureAlgorithm;
this.signerCertificateChain = signerCertificateChain;
@@ -2403,6 +2414,10 @@
return tsaCertificate;
}
+ public String getTSAPolicyID() {
+ return tSAPolicyID;
+ }
+
/**
* Retrieves the signature.
*
--- a/jdk/src/share/classes/sun/security/tools/jarsigner/Resources.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/security/tools/jarsigner/Resources.java Tue Apr 23 11:13:38 2013 +0100
@@ -86,6 +86,8 @@
"[-tsa <url>] location of the Timestamping Authority"},
{".tsacert.alias.public.key.certificate.for.Timestamping.Authority",
"[-tsacert <alias>] public key certificate for Timestamping Authority"},
+ {".tsapolicyid.tsapolicyid.for.Timestamping.Authority",
+ "[-tsapolicyid <oid>] TSAPolicyID for Timestamping Authority"},
{".altsigner.class.class.name.of.an.alternative.signing.mechanism",
"[-altsigner <class>] class name of an alternative signing mechanism"},
{".altsignerpath.pathlist.location.of.an.alternative.signing.mechanism",
--- a/jdk/src/share/classes/sun/security/tools/jarsigner/TimestampedSigner.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/share/classes/sun/security/tools/jarsigner/TimestampedSigner.java Tue Apr 23 11:13:38 2013 +0100
@@ -133,7 +133,8 @@
}
}
return PKCS7.generateSignedData(signature, signerChain, content,
- params.getSignatureAlgorithm(), tsaURI);
+ params.getSignatureAlgorithm(), tsaURI,
+ params.getTSAPolicyID());
}
/**
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixChannelFactory.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixChannelFactory.java Tue Apr 23 11:13:38 2013 +0100
@@ -84,7 +84,7 @@
}
continue;
}
- if (option == LinkOption.NOFOLLOW_LINKS && supportsNoFollowLinks()) {
+ if (option == LinkOption.NOFOLLOW_LINKS && O_NOFOLLOW != 0) {
flags.noFollowLinks = true;
continue;
}
@@ -218,7 +218,7 @@
// follow links by default
boolean followLinks = true;
if (!flags.createNew && (flags.noFollowLinks || flags.deleteOnClose)) {
- if (flags.deleteOnClose && !supportsNoFollowLinks()) {
+ if (flags.deleteOnClose && O_NOFOLLOW == 0) {
try {
if (UnixFileAttributes.get(path, false).isSymbolicLink())
throw new UnixException("DELETE_ON_CLOSE specified and file is a symbolic link");
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixCopyFile.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixCopyFile.java Tue Apr 23 11:13:38 2013 +0100
@@ -189,7 +189,7 @@
// copy time stamps last
if (flags.copyBasicAttributes) {
try {
- if (dfd >= 0) {
+ if (dfd >= 0 && futimesSupported()) {
futimes(dfd,
attrs.lastAccessTime().to(TimeUnit.MICROSECONDS),
attrs.lastModifiedTime().to(TimeUnit.MICROSECONDS));
@@ -269,9 +269,15 @@
// copy time attributes
if (flags.copyBasicAttributes) {
try {
- futimes(fo,
- attrs.lastAccessTime().to(TimeUnit.MICROSECONDS),
- attrs.lastModifiedTime().to(TimeUnit.MICROSECONDS));
+ if (futimesSupported()) {
+ futimes(fo,
+ attrs.lastAccessTime().to(TimeUnit.MICROSECONDS),
+ attrs.lastModifiedTime().to(TimeUnit.MICROSECONDS));
+ } else {
+ utimes(target,
+ attrs.lastAccessTime().to(TimeUnit.MICROSECONDS),
+ attrs.lastModifiedTime().to(TimeUnit.MICROSECONDS));
+ }
} catch (UnixException x) {
if (flags.failIfUnableToCopyBasic)
x.rethrowAsIOException(target);
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixFileAttributeViews.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixFileAttributeViews.java Tue Apr 23 11:13:38 2013 +0100
@@ -73,6 +73,8 @@
int fd = file.openForAttributeAccess(followLinks);
try {
+ // assert followLinks || !UnixFileAttributes.get(fd).isSymbolicLink();
+
// if not changing both attributes then need existing attributes
if (lastModifiedTime == null || lastAccessTime == null) {
try {
@@ -92,9 +94,13 @@
boolean retry = false;
try {
- futimes(fd, accessValue, modValue);
+ if (futimesSupported()) {
+ futimes(fd, accessValue, modValue);
+ } else {
+ utimes(file, accessValue, modValue);
+ }
} catch (UnixException x) {
- // if futimes fails with EINVAL and one/both of the times is
+ // if futimes/utimes fails with EINVAL and one/both of the times is
// negative then we adjust the value to the epoch and retry.
if (x.errno() == UnixConstants.EINVAL &&
(modValue < 0L || accessValue < 0L)) {
@@ -107,7 +113,11 @@
if (modValue < 0L) modValue = 0L;
if (accessValue < 0L) accessValue= 0L;
try {
- futimes(fd, accessValue, modValue);
+ if (futimesSupported()) {
+ futimes(fd, accessValue, modValue);
+ } else {
+ utimes(file, accessValue, modValue);
+ }
} catch (UnixException x) {
x.rethrowAsIOException(file);
}
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixFileAttributes.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixFileAttributes.java Tue Apr 23 11:13:38 2013 +0100
@@ -51,6 +51,7 @@
private long st_mtime_nsec;
private long st_ctime_sec;
private long st_ctime_nsec;
+ private long st_birthtime_sec;
// created lazily
private volatile UserPrincipal owner;
@@ -139,7 +140,12 @@
@Override
public FileTime creationTime() {
- return lastModifiedTime();
+ if (UnixNativeDispatcher.birthtimeSupported()) {
+ return FileTime.from(st_birthtime_sec, TimeUnit.SECONDS);
+ } else {
+ // return last modified when birth time not supported
+ return lastModifiedTime();
+ }
}
@Override
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixFileSystemProvider.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixFileSystemProvider.java Tue Apr 23 11:13:38 2013 +0100
@@ -394,9 +394,9 @@
if (filter == null)
throw new NullPointerException();
- // can't return SecureDirectoryStream on kernels that don't support
- // openat, etc.
- if (!supportsAtSysCalls() || !supportsNoFollowLinks()) {
+ // can't return SecureDirectoryStream on kernels that don't support openat
+ // or O_NOFOLLOW
+ if (!openatSupported() || O_NOFOLLOW == 0) {
try {
long ptr = opendir(dir);
return new UnixDirectoryStream(dir, ptr, filter);
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixNativeDispatcher.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixNativeDispatcher.java Tue Apr 23 11:13:38 2013 +0100
@@ -537,30 +537,42 @@
*/
static native byte[] strerror(int errnum);
- // indicates if openat, unlinkat, etc. is supported
- private static final boolean hasAtSysCalls;
- static boolean supportsAtSysCalls() {
- return hasAtSysCalls;
+ /**
+ * Capabilities
+ */
+ private static final int SUPPORTS_OPENAT = 1 << 1; // syscalls
+ private static final int SUPPORTS_FUTIMES = 1 << 2;
+ private static final int SUPPORTS_BIRTHTIME = 1 << 16; // other features
+ private static final int capabilities;
+
+ /**
+ * Supports openat and other *at calls.
+ */
+ static boolean openatSupported() {
+ return (capabilities & SUPPORTS_OPENAT) != 0;
}
- static boolean supportsNoFollowLinks() {
- return UnixConstants.O_NOFOLLOW != 0;
+ /**
+ * Supports futimes or futimesat
+ */
+ static boolean futimesSupported() {
+ return (capabilities & SUPPORTS_FUTIMES) != 0;
}
- // initialize syscalls and fieldIDs
- private static native int init();
+ /**
+ * Supports file birth (creation) time attribute
+ */
+ static boolean birthtimeSupported() {
+ return (capabilities & SUPPORTS_BIRTHTIME) != 0;
+ }
- // flags returned by init to indicate capabilities
- private static final int HAS_AT_SYSCALLS = 0x1;
-
+ private static native int init();
static {
AccessController.doPrivileged(new PrivilegedAction<Void>() {
public Void run() {
System.loadLibrary("nio");
return null;
}});
- int flags = init();
-
- hasAtSysCalls = (flags & HAS_AT_SYSCALLS) > 0;
+ capabilities = init();
}
}
--- a/jdk/src/solaris/classes/sun/nio/fs/UnixPath.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/classes/sun/nio/fs/UnixPath.java Tue Apr 23 11:13:38 2013 +0100
@@ -769,7 +769,7 @@
int openForAttributeAccess(boolean followLinks) throws IOException {
int flags = O_RDONLY;
if (!followLinks) {
- if (!supportsNoFollowLinks())
+ if (O_NOFOLLOW == 0)
throw new IOException("NOFOLLOW_LINKS is not supported on this platform");
flags |= O_NOFOLLOW;
}
--- a/jdk/src/solaris/native/sun/nio/fs/UnixNativeDispatcher.c Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/solaris/native/sun/nio/fs/UnixNativeDispatcher.c Tue Apr 23 11:13:38 2013 +0100
@@ -97,6 +97,10 @@
static jfieldID attrs_st_ctime_sec;
static jfieldID attrs_st_ctime_nsec;
+#ifdef _DARWIN_FEATURE_64_BIT_INODE
+static jfieldID attrs_st_birthtime_sec;
+#endif
+
static jfieldID attrs_f_frsize;
static jfieldID attrs_f_blocks;
static jfieldID attrs_f_bfree;
@@ -171,7 +175,7 @@
JNIEXPORT jint JNICALL
Java_sun_nio_fs_UnixNativeDispatcher_init(JNIEnv* env, jclass this)
{
- jint flags = 0;
+ jint capabilities = 0;
jclass clazz;
clazz = (*env)->FindClass(env, "sun/nio/fs/UnixFileAttributes");
@@ -193,6 +197,10 @@
attrs_st_ctime_sec = (*env)->GetFieldID(env, clazz, "st_ctime_sec", "J");
attrs_st_ctime_nsec = (*env)->GetFieldID(env, clazz, "st_ctime_nsec", "J");
+#ifdef _DARWIN_FEATURE_64_BIT_INODE
+ attrs_st_birthtime_sec = (*env)->GetFieldID(env, clazz, "st_birthtime_sec", "J");
+#endif
+
clazz = (*env)->FindClass(env, "sun/nio/fs/UnixFileStoreAttributes");
if (clazz == NULL) {
return 0;
@@ -233,14 +241,31 @@
my_fstatat64_func = (fstatat64_func*)&fstatat64_wrapper;
#endif
+ /* supports futimes or futimesat */
+
+#ifdef _ALLBSD_SOURCE
+ capabilities |= sun_nio_fs_UnixNativeDispatcher_SUPPORTS_FUTIMES;
+#else
+ if (my_futimesat_func != NULL)
+ capabilities |= sun_nio_fs_UnixNativeDispatcher_SUPPORTS_FUTIMES;
+#endif
+
+ /* supports openat, etc. */
+
if (my_openat64_func != NULL && my_fstatat64_func != NULL &&
my_unlinkat_func != NULL && my_renameat_func != NULL &&
my_futimesat_func != NULL && my_fdopendir_func != NULL)
{
- flags |= sun_nio_fs_UnixNativeDispatcher_HAS_AT_SYSCALLS;
+ capabilities |= sun_nio_fs_UnixNativeDispatcher_SUPPORTS_OPENAT;
}
- return flags;
+ /* supports file birthtime */
+
+#ifdef _DARWIN_FEATURE_64_BIT_INODE
+ capabilities |= sun_nio_fs_UnixNativeDispatcher_SUPPORTS_BIRTHTIME;
+#endif
+
+ return capabilities;
}
JNIEXPORT jbyteArray JNICALL
@@ -405,6 +430,10 @@
(*env)->SetLongField(env, attrs, attrs_st_mtime_sec, (jlong)buf->st_mtime);
(*env)->SetLongField(env, attrs, attrs_st_ctime_sec, (jlong)buf->st_ctime);
+#ifdef _DARWIN_FEATURE_64_BIT_INODE
+ (*env)->SetLongField(env, attrs, attrs_st_birthtime_sec, (jlong)buf->st_birthtime);
+#endif
+
#if (_POSIX_C_SOURCE >= 200809L) || defined(__solaris__)
(*env)->SetLongField(env, attrs, attrs_st_atime_nsec, (jlong)buf->st_atim.tv_nsec);
(*env)->SetLongField(env, attrs, attrs_st_mtime_nsec, (jlong)buf->st_mtim.tv_nsec);
--- a/jdk/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,6 +37,7 @@
import java.text.spi.NumberFormatProvider;
import java.util.Calendar;
import java.util.Collections;
+import java.util.Currency;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
@@ -48,6 +49,8 @@
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.spi.CalendarDataProvider;
import java.util.spi.CalendarNameProvider;
+import java.util.spi.CurrencyNameProvider;
+import java.util.spi.LocaleNameProvider;
import sun.util.spi.CalendarProvider;
/**
@@ -72,6 +75,14 @@
private static final int CD_FIRSTDAYOFWEEK = 0;
private static final int CD_MINIMALDAYSINFIRSTWEEK = 1;
+ // Currency/Locale display name types
+ private static final int DN_CURRENCY_NAME = 0;
+ private static final int DN_CURRENCY_SYMBOL = 1;
+ private static final int DN_LOCALE_LANGUAGE = 2;
+ private static final int DN_LOCALE_SCRIPT = 3;
+ private static final int DN_LOCALE_REGION = 4;
+ private static final int DN_LOCALE_VARIANT = 5;
+
// Native Calendar ID to LDML calendar type map
private static final String[] calIDToLDML = {
"",
@@ -96,15 +107,25 @@
private static ConcurrentMap<Locale, SoftReference<DecimalFormatSymbols>> decimalFormatSymbolsCache = new ConcurrentHashMap<>();
private static final Set<Locale> supportedLocaleSet;
+ private static final String nativeDisplayLanguage;
static {
Set<Locale> tmpSet = new HashSet<>();
if (initialize()) {
// Assuming the default locales do not include any extensions, so
// no stripping is needed here.
- Locale l = Locale.forLanguageTag(getDefaultLocale(CAT_FORMAT).replace('_', '-'));
- tmpSet.addAll(Control.getNoFallbackControl(Control.FORMAT_DEFAULT).getCandidateLocales("", l));
- l = Locale.forLanguageTag(getDefaultLocale(CAT_DISPLAY).replace('_', '-'));
- tmpSet.addAll(Control.getNoFallbackControl(Control.FORMAT_DEFAULT).getCandidateLocales("", l));
+ Control c = Control.getNoFallbackControl(Control.FORMAT_DEFAULT);
+ String displayLocale = getDefaultLocale(CAT_DISPLAY);
+ Locale l = Locale.forLanguageTag(displayLocale.replace('_', '-'));
+ tmpSet.addAll(c.getCandidateLocales("", l));
+ nativeDisplayLanguage = l.getLanguage();
+
+ String formatLocale = getDefaultLocale(CAT_FORMAT);
+ if (!formatLocale.equals(displayLocale)) {
+ l = Locale.forLanguageTag(formatLocale.replace('_', '-'));
+ tmpSet.addAll(c.getCandidateLocales("", l));
+ }
+ } else {
+ nativeDisplayLanguage = "";
}
supportedLocaleSet = Collections.unmodifiableSet(tmpSet);
}
@@ -392,6 +413,96 @@
};
}
+ public static CurrencyNameProvider getCurrencyNameProvider() {
+ return new CurrencyNameProvider() {
+ @Override
+ public Locale[] getAvailableLocales() {
+ return supportedLocale;
+ }
+
+ @Override
+ public boolean isSupportedLocale(Locale locale) {
+ // Ignore the extensions for now
+ return supportedLocaleSet.contains(locale.stripExtensions()) &&
+ locale.getLanguage().equals(nativeDisplayLanguage);
+ }
+
+ @Override
+ public String getSymbol(String currencyCode, Locale locale) {
+ // Retrieves the currency symbol by calling
+ // GetLocaleInfoEx(LOCALE_SCURRENCY).
+ // It only works with the "locale"'s currency in its native
+ // language.
+ try {
+ if (Currency.getInstance(locale).getCurrencyCode()
+ .equals(currencyCode)) {
+ return getDisplayString(locale.toLanguageTag(),
+ DN_CURRENCY_SYMBOL, currencyCode);
+ }
+ } catch (IllegalArgumentException iae) {}
+ return null;
+ }
+
+ @Override
+ public String getDisplayName(String currencyCode, Locale locale) {
+ // Retrieves the display name by calling
+ // GetLocaleInfoEx(LOCALE_SNATIVECURRNAME).
+ // It only works with the "locale"'s currency in its native
+ // language.
+ try {
+ if (Currency.getInstance(locale).getCurrencyCode()
+ .equals(currencyCode)) {
+ return getDisplayString(locale.toLanguageTag(),
+ DN_CURRENCY_NAME, currencyCode);
+ }
+ } catch (IllegalArgumentException iae) {}
+ return null;
+ }
+ };
+ }
+
+ public static LocaleNameProvider getLocaleNameProvider() {
+ return new LocaleNameProvider() {
+ @Override
+ public Locale[] getAvailableLocales() {
+ return supportedLocale;
+ }
+
+ @Override
+ public boolean isSupportedLocale(Locale locale) {
+ return supportedLocaleSet.contains(locale.stripExtensions()) &&
+ locale.getLanguage().equals(nativeDisplayLanguage);
+ }
+
+ @Override
+ public String getDisplayLanguage(String languageCode, Locale locale) {
+ // Retrieves the display language name by calling
+ // GetLocaleInfoEx(LOCALE_SLOCALIZEDLANGUAGENAME).
+ return getDisplayString(locale.toLanguageTag(),
+ DN_LOCALE_LANGUAGE, languageCode);
+ }
+
+ @Override
+ public String getDisplayCountry(String countryCode, Locale locale) {
+ // Retrieves the display country name by calling
+ // GetLocaleInfoEx(LOCALE_SLOCALIZEDCOUNTRYNAME).
+ return getDisplayString(locale.toLanguageTag(),
+ DN_LOCALE_REGION, nativeDisplayLanguage+"-"+countryCode);
+ }
+
+ @Override
+ public String getDisplayScript(String scriptCode, Locale locale) {
+ return null;
+ }
+
+ @Override
+ public String getDisplayVariant(String variantCode, Locale locale) {
+ return null;
+ }
+ };
+ }
+
+
private static String convertDateTimePattern(String winPattern) {
String ret = winPattern.replaceAll("dddd", "EEEE");
ret = ret.replaceAll("ddd", "EEE");
@@ -413,12 +524,21 @@
}
private static boolean isSupportedCalendarLocale(Locale locale) {
- Locale base = locale.stripExtensions();
+ Locale base = locale;
+
+ if (base.hasExtensions() || base.getVariant() != "") {
+ // strip off extensions and variant.
+ base = new Locale.Builder()
+ .setLocale(locale)
+ .clearExtensions()
+ .build();
+ }
+
if (!supportedLocaleSet.contains(base)) {
return false;
}
- int calid = getCalendarID(locale.toLanguageTag());
+ int calid = getCalendarID(base.toLanguageTag());
if (calid <= 0 || calid >= calIDToLDML.length) {
return false;
}
@@ -546,4 +666,7 @@
// For CalendarDataProvider
private static native int getCalendarDataValue(String langTag, int type);
+
+ // For Locale/CurrencyNameProvider
+ private static native String getDisplayString(String langTag, int key, String value);
}
--- a/jdk/src/windows/native/sun/util/locale/provider/HostLocaleProviderAdapter_md.c Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/src/windows/native/sun/util/locale/provider/HostLocaleProviderAdapter_md.c Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -196,7 +196,7 @@
break;
}
- localeString = getJavaIDFromLangID(langid);
+ localeString = (char *)getJavaIDFromLangID(langid);
ret = (*env)->NewStringUTF(env, localeString);
free(localeString);
return ret;
@@ -366,12 +366,14 @@
*/
JNIEXPORT jboolean JNICALL Java_sun_util_locale_provider_HostLocaleProviderAdapterImpl_isNativeDigit
(JNIEnv *env, jclass cls, jstring jlangtag) {
- WCHAR buf[BUFLEN];
+ DWORD num;
const jchar *langtag = (*env)->GetStringChars(env, jlangtag, JNI_FALSE);
- int got = getLocaleInfoWrapper(langtag, LOCALE_IDIGITSUBSTITUTION, buf, BUFLEN);
+ int got = getLocaleInfoWrapper(langtag,
+ LOCALE_IDIGITSUBSTITUTION | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&num, sizeof(num));
(*env)->ReleaseStringChars(env, jlangtag, langtag);
- return got && buf[0] == L'2'; // 2: native digit substitution
+ return got && num == 2; // 2: native digit substitution
}
/*
@@ -590,25 +592,72 @@
*/
JNIEXPORT jint JNICALL Java_sun_util_locale_provider_HostLocaleProviderAdapterImpl_getCalendarDataValue
(JNIEnv *env, jclass cls, jstring jlangtag, jint type) {
- WCHAR buf[BUFLEN];
+ DWORD num;
const jchar *langtag = (*env)->GetStringChars(env, jlangtag, JNI_FALSE);
int got = 0;
switch (type) {
case sun_util_locale_provider_HostLocaleProviderAdapterImpl_CD_FIRSTDAYOFWEEK:
- got = getLocaleInfoWrapper(langtag, LOCALE_IFIRSTDAYOFWEEK, buf, BUFLEN);
+ got = getLocaleInfoWrapper(langtag,
+ LOCALE_IFIRSTDAYOFWEEK | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&num, sizeof(num));
break;
}
(*env)->ReleaseStringChars(env, jlangtag, langtag);
if (got) {
- return _wtoi(buf);
+ return num;
} else {
return -1;
}
}
+/*
+ * Class: sun_util_locale_provider_HostLocaleProviderAdapterImpl
+ * Method: getDisplayString
+ * Signature: (Ljava/lang/String;ILjava/lang/String;)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL Java_sun_util_locale_provider_HostLocaleProviderAdapterImpl_getDisplayString
+ (JNIEnv *env, jclass cls, jstring jlangtag, jint type, jstring jvalue) {
+ LCTYPE lcType;
+ jstring jStr;
+ const jchar * pjChar;
+ WCHAR buf[BUFLEN];
+ int got = 0;
+
+ switch (type) {
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_DN_CURRENCY_NAME:
+ lcType = LOCALE_SNATIVECURRNAME;
+ jStr = jlangtag;
+ break;
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_DN_CURRENCY_SYMBOL:
+ lcType = LOCALE_SCURRENCY;
+ jStr = jlangtag;
+ break;
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_DN_LOCALE_LANGUAGE:
+ lcType = LOCALE_SLOCALIZEDLANGUAGENAME;
+ jStr = jvalue;
+ break;
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_DN_LOCALE_REGION:
+ lcType = LOCALE_SLOCALIZEDCOUNTRYNAME;
+ jStr = jvalue;
+ break;
+ default:
+ return NULL;
+ }
+
+ pjChar = (*env)->GetStringChars(env, jStr, JNI_FALSE);
+ got = getLocaleInfoWrapper(pjChar, lcType, buf, BUFLEN);
+ (*env)->ReleaseStringChars(env, jStr, pjChar);
+
+ if (got) {
+ return (*env)->NewString(env, buf, wcslen(buf));
+ } else {
+ return NULL;
+ }
+}
+
int getLocaleInfoWrapper(const jchar *langtag, LCTYPE type, LPWSTR data, int buflen) {
if (pGetLocaleInfoEx) {
if (wcscmp(L"und", (LPWSTR)langtag) == 0) {
@@ -642,11 +691,13 @@
}
jint getCalendarID(const jchar *langtag) {
- WCHAR type[BUFLEN];
- int got = getLocaleInfoWrapper(langtag, LOCALE_ICALENDARTYPE, type, BUFLEN);
+ DWORD type;
+ int got = getLocaleInfoWrapper(langtag,
+ LOCALE_ICALENDARTYPE | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&type, sizeof(type));
if (got) {
- return _wtoi(type);
+ return type;
} else {
return 0;
}
@@ -691,28 +742,37 @@
}
void getNumberPart(const jchar * langtag, const jint numberStyle, WCHAR * number) {
- WCHAR buf[BUFLEN];
+ DWORD digits = 0;
+ DWORD leadingZero = 0;
WCHAR grouping[BUFLEN];
+ int groupingLen;
WCHAR fractionPattern[BUFLEN];
WCHAR * integerPattern = number;
- int digits;
- BOOL leadingZero;
WCHAR * pDest;
- int groupingLen;
// Get info from Windows
- if (numberStyle == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_CURRENCY) {
- getLocaleInfoWrapper(langtag, LOCALE_ICURRDIGITS, buf, BUFLEN);
- } else {
- getLocaleInfoWrapper(langtag, LOCALE_IDIGITS, buf, BUFLEN);
+ switch (numberStyle) {
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_CURRENCY:
+ getLocaleInfoWrapper(langtag,
+ LOCALE_ICURRDIGITS | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&digits, sizeof(digits));
+ break;
+
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_INTEGER:
+ break;
+
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_NUMBER:
+ case sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_PERCENT:
+ default:
+ getLocaleInfoWrapper(langtag,
+ LOCALE_IDIGITS | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&digits, sizeof(digits));
+ break;
}
- if (numberStyle == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_INTEGER) {
- digits = 0;
- } else {
- digits = _wtoi(buf);
- }
- getLocaleInfoWrapper(langtag, LOCALE_ILZERO, buf, BUFLEN);
- leadingZero = _wtoi(buf) != 0;
+
+ getLocaleInfoWrapper(langtag,
+ LOCALE_ILZERO | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&leadingZero, sizeof(leadingZero));
groupingLen = getLocaleInfoWrapper(langtag, LOCALE_SGROUPING, grouping, BUFLEN);
// fraction pattern
@@ -749,7 +809,7 @@
}
}
- if (leadingZero) {
+ if (leadingZero != 0) {
*pDest++ = L'0';
} else {
*pDest++ = L'#';
@@ -760,29 +820,35 @@
}
void getFixPart(const jchar * langtag, const jint numberStyle, BOOL positive, BOOL prefix, WCHAR * ret) {
- WCHAR buf[BUFLEN];
- int pattern = 0;
+ DWORD pattern = 0;
int style = numberStyle;
int got = 0;
if (positive) {
if (style == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_CURRENCY) {
- got = getLocaleInfoWrapper(langtag, LOCALE_ICURRENCY, buf, BUFLEN);
+ got = getLocaleInfoWrapper(langtag,
+ LOCALE_ICURRENCY | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&pattern, sizeof(pattern));
} else if (style == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_PERCENT) {
- got = getLocaleInfoWrapper(langtag, LOCALE_IPOSITIVEPERCENT, buf, BUFLEN);
+ got = getLocaleInfoWrapper(langtag,
+ LOCALE_IPOSITIVEPERCENT | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&pattern, sizeof(pattern));
}
} else {
if (style == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_CURRENCY) {
- got = getLocaleInfoWrapper(langtag, LOCALE_INEGCURR, buf, BUFLEN);
+ got = getLocaleInfoWrapper(langtag,
+ LOCALE_INEGCURR | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&pattern, sizeof(pattern));
} else if (style == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_PERCENT) {
- got = getLocaleInfoWrapper(langtag, LOCALE_INEGATIVEPERCENT, buf, BUFLEN);
+ got = getLocaleInfoWrapper(langtag,
+ LOCALE_INEGATIVEPERCENT | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&pattern, sizeof(pattern));
} else {
- got = getLocaleInfoWrapper(langtag, LOCALE_INEGNUMBER, buf, BUFLEN);
+ got = getLocaleInfoWrapper(langtag,
+ LOCALE_INEGNUMBER | LOCALE_RETURN_NUMBER,
+ (LPWSTR)&pattern, sizeof(pattern));
}
}
- if (got) {
- pattern = _wtoi(buf);
- }
if (numberStyle == sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_INTEGER) {
style = sun_util_locale_provider_HostLocaleProviderAdapterImpl_NF_NUMBER;
--- a/jdk/test/Makefile Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/Makefile Tue Apr 23 11:13:38 2013 +0100
@@ -502,7 +502,7 @@
# Stable agentvm testruns (TestNG)
JDK_DEFAULT_TARGETS += jdk_time
jdk_time: $(call TestDirs, java/time)
- $(call RunOthervmBatch)
+ $(call RunAgentvmBatch)
# Stable agentvm testruns (minus items from PROBLEM_LIST)
JDK_ALL_TARGETS += jdk_other
--- a/jdk/test/ProblemList.txt Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/ProblemList.txt Tue Apr 23 11:13:38 2013 +0100
@@ -144,6 +144,9 @@
# jdk_management
+# 8010897
+sun/management/HotspotRuntimeMBean/GetSafepointSyncTime.java macosx-all
+
############################################################################
# jdk_jmx
--- a/jdk/test/java/nio/file/attribute/BasicFileAttributeView/Basic.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/java/nio/file/attribute/BasicFileAttributeView/Basic.java Tue Apr 23 11:13:38 2013 +0100
@@ -70,22 +70,16 @@
check(f.lastModified()/1000 == attrs.lastModifiedTime().to(TimeUnit.SECONDS),
"last-modified time should be the same");
- // copy last-modified time and file create time from directory to file,
+ // copy last-modified time from directory to file,
// re-read attribtues, and check they match
BasicFileAttributeView view =
Files.getFileAttributeView(file, BasicFileAttributeView.class);
BasicFileAttributes dirAttrs = Files.readAttributes(dir, BasicFileAttributes.class);
view.setTimes(dirAttrs.lastModifiedTime(), null, null);
- if (dirAttrs.creationTime() != null) {
- view.setTimes(null, null, dirAttrs.creationTime());
- }
+
attrs = view.readAttributes();
check(attrs.lastModifiedTime().equals(dirAttrs.lastModifiedTime()),
"last-modified time should be equal");
- if (dirAttrs.creationTime() != null) {
- check(attrs.creationTime().equals(dirAttrs.creationTime()),
- "create time should be the same");
- }
// security tests
check (!(attrs instanceof PosixFileAttributes),
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/nio/file/attribute/BasicFileAttributeView/CreationTime.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* @test
+ * @bug 8011536
+ * @summary Basic test for creationTime attribute on platforms/file systems
+ * that support it.
+ * @library ../..
+ */
+
+import java.nio.file.Path;
+import java.nio.file.Files;
+import java.nio.file.attribute.*;
+import java.time.Instant;
+import java.io.IOException;
+
+public class CreationTime {
+
+ private static final java.io.PrintStream err = System.err;
+
+ /**
+ * Reads the creationTime attribute
+ */
+ private static FileTime creationTime(Path file) throws IOException {
+ return Files.readAttributes(file, BasicFileAttributes.class).creationTime();
+ }
+
+ /**
+ * Sets the creationTime attribute
+ */
+ private static void setCreationTime(Path file, FileTime time) throws IOException {
+ BasicFileAttributeView view =
+ Files.getFileAttributeView(file, BasicFileAttributeView.class);
+ view.setTimes(null, null, time);
+ }
+
+ static void test(Path top) throws IOException {
+ Path file = Files.createFile(top.resolve("foo"));
+
+ /**
+ * Check that creationTime reported
+ */
+ FileTime creationTime = creationTime(file);
+ Instant now = Instant.now();
+ if (Math.abs(creationTime.toMillis()-now.toEpochMilli()) > 10000L) {
+ err.println("File creation time reported as: " + creationTime);
+ throw new RuntimeException("Expected to be close to: " + now);
+ }
+
+ /**
+ * Is the creationTime attribute supported here?
+ */
+ boolean supportsCreationTimeRead = false;
+ boolean supportsCreationTimeWrite = false;
+ String os = System.getProperty("os.name");
+ if (os.contains("OS X") && Files.getFileStore(file).type().equals("hfs")) {
+ supportsCreationTimeRead = true;
+ } else if (os.startsWith("Windows")) {
+ String type = Files.getFileStore(file).type();
+ if (type.equals("NTFS") || type.equals("FAT")) {
+ supportsCreationTimeRead = true;
+ supportsCreationTimeWrite = true;
+ }
+ }
+
+ /**
+ * If the creation-time attribute is supported then change the file's
+ * last modified and check that it doesn't change the creation-time.
+ */
+ if (supportsCreationTimeRead) {
+ // change modified time by +1 hour
+ Instant plusHour = Instant.now().plusSeconds(60L * 60L);
+ Files.setLastModifiedTime(file, FileTime.from(plusHour));
+ FileTime current = creationTime(file);
+ if (!current.equals(creationTime))
+ throw new RuntimeException("Creation time should not have changed");
+ }
+
+ /**
+ * If the creation-time attribute is supported and can be changed then
+ * check that the change is effective.
+ */
+ if (supportsCreationTimeWrite) {
+ // change creation time by -1 hour
+ Instant minusHour = Instant.now().minusSeconds(60L * 60L);
+ creationTime = FileTime.from(minusHour);
+ setCreationTime(file, creationTime);
+ FileTime current = creationTime(file);
+ if (Math.abs(creationTime.toMillis()-current.toMillis()) > 1000L)
+ throw new RuntimeException("Creation time not changed");
+ }
+ }
+
+ public static void main(String[] args) throws IOException {
+ // create temporary directory to run tests
+ Path dir = TestUtil.createTemporaryDirectory();
+ try {
+ test(dir);
+ } finally {
+ TestUtil.removeAll(dir);
+ }
+ }
+}
--- a/jdk/test/java/time/TEST.properties Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/java/time/TEST.properties Tue Apr 23 11:13:38 2013 +0100
@@ -1,3 +1,3 @@
# Threeten test uses TestNG
TestNG.dirs = .
-
+othervm.dirs = tck/java/time/chrono test/java/time/chrono test/java/time/format
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Collection/CollectionDefaults.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertTrue;
+import static org.testng.Assert.fail;
+
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.function.Predicate;
+
+/**
+ * @test
+ * @library testlibrary
+ * @build CollectionAsserts CollectionSupplier
+ * @run testng CollectionDefaults
+ * @summary Unit tests for extension methods on Collection
+ */
+public class CollectionDefaults {
+
+ public static final Predicate<Integer> pEven = x -> 0 == x % 2;
+ public static final Predicate<Integer> pOdd = x -> 1 == x % 2;
+
+ private static final String[] SET_CLASSES = {
+ "java.util.HashSet",
+ "java.util.LinkedHashSet",
+ "java.util.TreeSet"
+ };
+
+ private static final int SIZE = 100;
+
+ @DataProvider(name="setProvider", parallel=true)
+ public static Object[][] setCases() {
+ final List<Object[]> cases = new LinkedList<>();
+ cases.add(new Object[] { new HashSet<>() });
+ cases.add(new Object[] { new LinkedHashSet<>() });
+ cases.add(new Object[] { new TreeSet<>() });
+
+ cases.add(new Object[] { Collections.newSetFromMap(new HashMap<>()) });
+ cases.add(new Object[] { Collections.newSetFromMap(new LinkedHashMap()) });
+ cases.add(new Object[] { Collections.newSetFromMap(new TreeMap<>()) });
+
+ cases.add(new Object[] { new HashSet(){{add(42);}} });
+ cases.add(new Object[] { new LinkedHashSet(){{add(42);}} });
+ cases.add(new Object[] { new TreeSet(){{add(42);}} });
+ return cases.toArray(new Object[0][cases.size()]);
+ }
+
+ @Test(dataProvider = "setProvider")
+ public void testProvidedWithNull(final Set<Integer> set) throws Exception {
+ try {
+ set.forEach(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ try {
+ set.removeIf(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ }
+
+ @Test
+ public void testForEach() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(SET_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final Set<Integer> original = ((Set<Integer>) test.original);
+ final Set<Integer> set = ((Set<Integer>) test.collection);
+
+ try {
+ set.forEach(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ if (test.className.equals("java.util.HashSet")) {
+ CollectionAsserts.assertContentsUnordered(set, original);
+ } else {
+ CollectionAsserts.assertContents(set, original);
+ }
+
+ final List<Integer> actual = new LinkedList<>();
+ set.forEach(actual::add);
+ if (test.className.equals("java.util.HashSet")) {
+ CollectionAsserts.assertContentsUnordered(actual, set);
+ CollectionAsserts.assertContentsUnordered(actual, original);
+ } else {
+ CollectionAsserts.assertContents(actual, set);
+ CollectionAsserts.assertContents(actual, original);
+ }
+ }
+ }
+
+ @Test
+ public void testRemoveIf() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(SET_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final Set<Integer> original = ((Set<Integer>) test.original);
+ final Set<Integer> set = ((Set<Integer>) test.collection);
+
+ try {
+ set.removeIf(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ if (test.className.equals("java.util.HashSet")) {
+ CollectionAsserts.assertContentsUnordered(set, original);
+ } else {
+ CollectionAsserts.assertContents(set, original);
+ }
+
+ set.removeIf(pEven);
+ for (int i : set) {
+ assertTrue((i % 2) == 1);
+ }
+ for (int i : original) {
+ if (i % 2 == 1) {
+ assertTrue(set.contains(i));
+ }
+ }
+ set.removeIf(pOdd);
+ assertTrue(set.isEmpty());
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Collection/ListDefaults.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,530 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Comparators;
+import java.util.List;
+import java.util.LinkedList;
+import java.util.Stack;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.Vector;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertTrue;
+import static org.testng.Assert.fail;
+
+import java.lang.reflect.Constructor;
+import java.util.ConcurrentModificationException;
+import java.util.function.Predicate;
+
+/**
+ * @test
+ * @library testlibrary
+ * @build CollectionAsserts CollectionSupplier
+ * @run testng ListDefaults
+ * @summary Unit tests for extension methods on List
+ */
+public class ListDefaults {
+
+ private static final String[] LIST_CLASSES = {
+ "java.util.ArrayList",
+ "java.util.LinkedList",
+ "java.util.Vector",
+ "java.util.concurrent.CopyOnWriteArrayList"
+ };
+
+ private static final String[] LIST_CME_CLASSES = {
+ "java.util.ArrayList",
+ "java.util.Vector"
+ };
+
+ private static final Predicate<Integer> pEven = x -> 0 == x % 2;
+ private static final Predicate<Integer> pOdd = x -> 1 == x % 2;
+
+ private static final Comparator<Integer> BIT_COUNT_COMPARATOR =
+ (x, y) -> Integer.bitCount(x) - Integer.bitCount(y);
+
+ private static final Comparator<AtomicInteger> ATOMIC_INTEGER_COMPARATOR =
+ (x, y) -> x.intValue() - y.intValue();
+
+ private static final int SIZE = 100;
+ private static final int SUBLIST_FROM = 20;
+ private static final int SUBLIST_TO = SIZE - 5;
+ private static final int SUBLIST_SIZE = SUBLIST_TO - SUBLIST_FROM;
+
+ private static interface Callback {
+ void call(List<Integer> list);
+ }
+
+ // call the callback for each recursive subList
+ private void trimmedSubList(final List<Integer> list, final Callback callback) {
+ int size = list.size();
+ if (size > 1) {
+ // trim 1 element from both ends
+ final List<Integer> subList = list.subList(1, size - 1);
+ callback.call(subList);
+ trimmedSubList(subList, callback);
+ }
+ }
+
+ @DataProvider(name="listProvider", parallel=true)
+ public static Object[][] listCases() {
+ final List<Object[]> cases = new LinkedList<>();
+ cases.add(new Object[] { new ArrayList<>() });
+ cases.add(new Object[] { new LinkedList<>() });
+ cases.add(new Object[] { new Vector<>() });
+ cases.add(new Object[] { new Stack<>() });
+ cases.add(new Object[] { new CopyOnWriteArrayList<>() });
+
+ cases.add(new Object[] { new ArrayList(){{add(42);}} });
+ cases.add(new Object[] { new LinkedList(){{add(42);}} });
+ cases.add(new Object[] { new Vector(){{add(42);}} });
+ cases.add(new Object[] { new Stack(){{add(42);}} });
+ cases.add(new Object[] { new CopyOnWriteArrayList(){{add(42);}} });
+ return cases.toArray(new Object[0][cases.size()]);
+ }
+
+ @Test(dataProvider = "listProvider")
+ public void testProvidedWithNull(final List<Integer> list) throws Exception {
+ try {
+ list.forEach(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ try {
+ list.replaceAll(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ try {
+ list.removeIf(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ }
+
+ @Test
+ public void testForEach() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+ }
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+
+ try {
+ list.forEach(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ CollectionAsserts.assertContents(list, original);
+
+ final List<Integer> actual = new LinkedList<>();
+ list.forEach(actual::add);
+ CollectionAsserts.assertContents(actual, list);
+ CollectionAsserts.assertContents(actual, original);
+
+ if (original.size() > SUBLIST_SIZE) {
+ final List<Integer> subList = original.subList(SUBLIST_FROM, SUBLIST_TO);
+ final List<Integer> actualSubList = new LinkedList<>();
+ subList.forEach(actualSubList::add);
+ assertEquals(actualSubList.size(), SUBLIST_SIZE);
+ for (int i = 0; i < SUBLIST_SIZE; i++) {
+ assertEquals(actualSubList.get(i), original.get(i + SUBLIST_FROM));
+ }
+ }
+
+ trimmedSubList(list, new Callback() {
+ @Override
+ public void call(final List<Integer> list) {
+ final List<Integer> actual = new LinkedList<>();
+ list.forEach(actual::add);
+ CollectionAsserts.assertContents(actual, list);
+ }
+ });
+ }
+ }
+
+ @Test
+ public void testRemoveIf() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE);
+
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+
+ try {
+ list.removeIf(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ CollectionAsserts.assertContents(list, original);
+
+ final AtomicInteger offset = new AtomicInteger(1);
+ while (list.size() > 0) {
+ removeFirst(original, list, offset);
+ }
+ }
+
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+ list.removeIf(pOdd);
+ for (int i : list) {
+ assertTrue((i % 2) == 0);
+ }
+ for (int i : original) {
+ if (i % 2 == 0) {
+ assertTrue(list.contains(i));
+ }
+ }
+ list.removeIf(pEven);
+ assertTrue(list.isEmpty());
+ }
+
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+ final List<Integer> listCopy = new ArrayList<>(list);
+ if (original.size() > SUBLIST_SIZE) {
+ final List<Integer> subList = list.subList(SUBLIST_FROM, SUBLIST_TO);
+ final List<Integer> subListCopy = new ArrayList<>(subList);
+ listCopy.removeAll(subList);
+ subList.removeIf(pOdd);
+ for (int i : subList) {
+ assertTrue((i % 2) == 0);
+ }
+ for (int i : subListCopy) {
+ if (i % 2 == 0) {
+ assertTrue(subList.contains(i));
+ } else {
+ assertFalse(subList.contains(i));
+ }
+ }
+ subList.removeIf(pEven);
+ assertTrue(subList.isEmpty());
+ // elements outside the view should remain
+ CollectionAsserts.assertContents(list, listCopy);
+ }
+ }
+
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ trimmedSubList(list, new Callback() {
+ @Override
+ public void call(final List<Integer> list) {
+ final List<Integer> copy = new ArrayList<>(list);
+ list.removeIf(pOdd);
+ for (int i : list) {
+ assertTrue((i % 2) == 0);
+ }
+ for (int i : copy) {
+ if (i % 2 == 0) {
+ assertTrue(list.contains(i));
+ } else {
+ assertFalse(list.contains(i));
+ }
+ }
+ }
+ });
+ }
+ }
+
+ // remove the first element
+ private void removeFirst(final List<Integer> original, final List<Integer> list, final AtomicInteger offset) {
+ final AtomicBoolean first = new AtomicBoolean(true);
+ list.removeIf(x -> first.getAndSet(false));
+ CollectionAsserts.assertContents(original.subList(offset.getAndIncrement(), original.size()), list);
+ }
+
+ @Test
+ public void testReplaceAll() throws Exception {
+ final int scale = 3;
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+
+ try {
+ list.replaceAll(null);
+ fail("expected NPE not thrown");
+ } catch (NullPointerException npe) {}
+ CollectionAsserts.assertContents(list, original);
+
+ list.replaceAll(x -> scale * x);
+ for (int i=0; i < original.size(); i++) {
+ assertTrue(list.get(i) == (scale * original.get(i)), "mismatch at index " + i);
+ }
+
+ if (original.size() > SUBLIST_SIZE) {
+ final List<Integer> subList = list.subList(SUBLIST_FROM, SUBLIST_TO);
+ subList.replaceAll(x -> x + 1);
+ // verify elements in view [from, to) were replaced
+ for (int i = 0; i < SUBLIST_SIZE; i++) {
+ assertTrue(subList.get(i) == ((scale * original.get(i + SUBLIST_FROM)) + 1),
+ "mismatch at sublist index " + i);
+ }
+ // verify that elements [0, from) remain unmodified
+ for (int i = 0; i < SUBLIST_FROM; i++) {
+ assertTrue(list.get(i) == (scale * original.get(i)),
+ "mismatch at original index " + i);
+ }
+ // verify that elements [to, size) remain unmodified
+ for (int i = SUBLIST_TO; i < list.size(); i++) {
+ assertTrue(list.get(i) == (scale * original.get(i)),
+ "mismatch at original index " + i);
+ }
+ }
+ }
+
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ trimmedSubList(list, new Callback() {
+ @Override
+ public void call(final List<Integer> list) {
+ final List<Integer> copy = new ArrayList<>(list);
+ final int offset = 5;
+ list.replaceAll(x -> offset + x);
+ for (int i=0; i < copy.size(); i++) {
+ assertTrue(list.get(i) == (offset + copy.get(i)), "mismatch at index " + i);
+ }
+ }
+ });
+ }
+ }
+
+ @Test
+ public void testSort() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> original = ((List<Integer>) test.original);
+ final List<Integer> list = ((List<Integer>) test.collection);
+ CollectionSupplier.shuffle(list);
+ list.sort(Integer::compare);
+ CollectionAsserts.assertSorted(list, Integer::compare);
+ if (test.name.startsWith("reverse")) {
+ Collections.reverse(list);
+ }
+ CollectionAsserts.assertContents(list, original);
+
+ CollectionSupplier.shuffle(list);
+ list.sort(null);
+ CollectionAsserts.assertSorted(list, Comparators.<Integer>naturalOrder());
+ if (test.name.startsWith("reverse")) {
+ Collections.reverse(list);
+ }
+ CollectionAsserts.assertContents(list, original);
+
+ CollectionSupplier.shuffle(list);
+ list.sort(Comparators.<Integer>naturalOrder());
+ CollectionAsserts.assertSorted(list, Comparators.<Integer>naturalOrder());
+ if (test.name.startsWith("reverse")) {
+ Collections.reverse(list);
+ }
+ CollectionAsserts.assertContents(list, original);
+
+ CollectionSupplier.shuffle(list);
+ list.sort(Comparators.<Integer>reverseOrder());
+ CollectionAsserts.assertSorted(list, Comparators.<Integer>reverseOrder());
+ if (!test.name.startsWith("reverse")) {
+ Collections.reverse(list);
+ }
+ CollectionAsserts.assertContents(list, original);
+
+ CollectionSupplier.shuffle(list);
+ list.sort(BIT_COUNT_COMPARATOR);
+ CollectionAsserts.assertSorted(list, BIT_COUNT_COMPARATOR);
+ // check sort by verifying that bitCount increases and never drops
+ int minBitCount = 0;
+ int bitCount = 0;
+ for (final Integer i : list) {
+ bitCount = Integer.bitCount(i);
+ assertTrue(bitCount >= minBitCount);
+ minBitCount = bitCount;
+ }
+
+ @SuppressWarnings("unchecked")
+ final Class<? extends List<AtomicInteger>> type =
+ (Class<? extends List<AtomicInteger>>) Class.forName(test.className);
+ final Constructor<? extends List<AtomicInteger>> defaultConstructor = type.getConstructor();
+ final List<AtomicInteger> incomparables = (List<AtomicInteger>) defaultConstructor.newInstance();
+
+ for (int i=0; i < test.original.size(); i++) {
+ incomparables.add(new AtomicInteger(i));
+ }
+ CollectionSupplier.shuffle(incomparables);
+ incomparables.sort(ATOMIC_INTEGER_COMPARATOR);
+ for (int i=0; i < test.original.size(); i++) {
+ assertEquals(i, incomparables.get(i).intValue());
+ }
+
+ if (original.size() > SUBLIST_SIZE) {
+ final List<Integer> copy = new ArrayList<>(list);
+ final List<Integer> subList = list.subList(SUBLIST_FROM, SUBLIST_TO);
+ CollectionSupplier.shuffle(subList);
+ subList.sort(Comparators.<Integer>naturalOrder());
+ CollectionAsserts.assertSorted(subList, Comparators.<Integer>naturalOrder());
+ // verify that elements [0, from) remain unmodified
+ for (int i = 0; i < SUBLIST_FROM; i++) {
+ assertTrue(list.get(i) == copy.get(i),
+ "mismatch at index " + i);
+ }
+ // verify that elements [to, size) remain unmodified
+ for (int i = SUBLIST_TO; i < list.size(); i++) {
+ assertTrue(list.get(i) == copy.get(i),
+ "mismatch at index " + i);
+ }
+ }
+ }
+
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ trimmedSubList(list, new Callback() {
+ @Override
+ public void call(final List<Integer> list) {
+ final List<Integer> copy = new ArrayList<>(list);
+ CollectionSupplier.shuffle(list);
+ list.sort(Comparators.<Integer>naturalOrder());
+ CollectionAsserts.assertSorted(list, Comparators.<Integer>naturalOrder());
+ }
+ });
+ }
+ }
+
+ @Test
+ public void testForEachThrowsCME() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ if (list.size() <= 1) {
+ continue;
+ }
+ boolean gotException = false;
+ try {
+ // bad predicate that modifies its list, should throw CME
+ list.forEach((x) -> {list.add(x);});
+ } catch (ConcurrentModificationException cme) {
+ gotException = true;
+ }
+ if (!gotException) {
+ fail("expected CME was not thrown from " + test);
+ }
+ }
+ }
+
+ @Test
+ public void testRemoveIfThrowsCME() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ if (list.size() <= 1) {
+ continue;
+ }
+ boolean gotException = false;
+ try {
+ // bad predicate that modifies its list, should throw CME
+ list.removeIf((x) -> {return list.add(x);});
+ } catch (ConcurrentModificationException cme) {
+ gotException = true;
+ }
+ if (!gotException) {
+ fail("expected CME was not thrown from " + test);
+ }
+ }
+ }
+
+ @Test
+ public void testReplaceAllThrowsCME() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ if (list.size() <= 1) {
+ continue;
+ }
+ boolean gotException = false;
+ try {
+ // bad predicate that modifies its list, should throw CME
+ list.replaceAll(x -> {int n = 3 * x; list.add(n); return n;});
+ } catch (ConcurrentModificationException cme) {
+ gotException = true;
+ }
+ if (!gotException) {
+ fail("expected CME was not thrown from " + test);
+ }
+ }
+ }
+
+ @Test
+ public void testSortThrowsCME() throws Exception {
+ final CollectionSupplier supplier = new CollectionSupplier(LIST_CME_CLASSES, SIZE);
+ for (final CollectionSupplier.TestCase test : supplier.get()) {
+ final List<Integer> list = ((List<Integer>) test.collection);
+ if (list.size() <= 1) {
+ continue;
+ }
+ boolean gotException = false;
+ try {
+ // bad predicate that modifies its list, should throw CME
+ list.sort((x, y) -> {list.add(x); return x - y;});
+ } catch (ConcurrentModificationException cme) {
+ gotException = true;
+ }
+ if (!gotException) {
+ fail("expected CME was not thrown from " + test);
+ }
+ }
+ }
+
+ private static final List<Integer> SLICED_EXPECTED = Arrays.asList(0, 1, 2, 3, 5, 6, 7, 8, 9);
+ private static final List<Integer> SLICED_EXPECTED2 = Arrays.asList(0, 1, 2, 5, 6, 7, 8, 9);
+
+ @DataProvider(name="shortIntListProvider", parallel=true)
+ public static Object[][] intListCases() {
+ final Integer[] DATA = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+ final List<Object[]> cases = new LinkedList<>();
+ cases.add(new Object[] { new ArrayList<>(Arrays.asList(DATA)) });
+ cases.add(new Object[] { new LinkedList<>(Arrays.asList(DATA)) });
+ cases.add(new Object[] { new Vector<>(Arrays.asList(DATA)) });
+ cases.add(new Object[] { new CopyOnWriteArrayList<>(Arrays.asList(DATA)) });
+ return cases.toArray(new Object[0][cases.size()]);
+ }
+
+ @Test(dataProvider = "shortIntListProvider")
+ public void testRemoveIfFromSlice(final List<Integer> list) throws Exception {
+ final List<Integer> sublist = list.subList(3, 6);
+ assertTrue(sublist.removeIf(x -> x == 4));
+ CollectionAsserts.assertContents(list, SLICED_EXPECTED);
+
+ final List<Integer> sublist2 = list.subList(2, 5);
+ assertTrue(sublist2.removeIf(x -> x == 3));
+ CollectionAsserts.assertContents(list, SLICED_EXPECTED2);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Collection/testlibrary/CollectionAsserts.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
+import static org.testng.Assert.fail;
+
+/**
+ * @library
+ * CollectionAssert -- assertion methods for lambda test cases
+ */
+public class CollectionAsserts {
+
+ public static void assertCountSum(Iterable<? super Integer> it, int count, int sum) {
+ assertCountSum(it.iterator(), count, sum);
+ }
+
+ public static void assertCountSum(Iterator<? super Integer> it, int count, int sum) {
+ int c = 0;
+ int s = 0;
+ while (it.hasNext()) {
+ int i = (Integer) it.next();
+ c++;
+ s += i;
+ }
+
+ assertEquals(c, count);
+ assertEquals(s, sum);
+ }
+
+ public static void assertConcat(Iterator<Character> it, String result) {
+ StringBuilder sb = new StringBuilder();
+ while (it.hasNext()) {
+ sb.append(it.next());
+ }
+
+ assertEquals(result, sb.toString());
+ }
+
+ public static<T extends Comparable<? super T>> void assertSorted(Iterator<T> i) {
+ if (!i.hasNext())
+ return;
+ T last = i.next();
+ while (i.hasNext()) {
+ T t = i.next();
+ assertTrue(last.compareTo(t) <= 0);
+ assertTrue(t.compareTo(last) >= 0);
+ last = t;
+ }
+ }
+
+ public static<T> void assertSorted(Iterator<T> i, Comparator<? super T> comp) {
+ if (!i.hasNext())
+ return;
+ T last = i.next();
+ while (i.hasNext()) {
+ T t = i.next();
+ assertTrue(comp.compare(last, t) <= 0);
+ assertTrue(comp.compare(t, last) >= 0);
+ last = t;
+ }
+ }
+
+ public static<T extends Comparable<? super T>> void assertSorted(Iterable<T> iter) {
+ assertSorted(iter.iterator());
+ }
+
+ public static<T> void assertSorted(Iterable<T> iter, Comparator<? super T> comp) {
+ assertSorted(iter.iterator(), comp);
+ }
+
+ public static <T> void assertUnique(Iterable<T> iter) {
+ assertUnique(iter.iterator());
+ }
+
+ public static<T> void assertUnique(Iterator<T> iter) {
+ if (!iter.hasNext()) {
+ return;
+ }
+
+ Set<T> uniq = new HashSet<>();
+ while(iter.hasNext()) {
+ T each = iter.next();
+ assertTrue(!uniq.contains(each));
+ uniq.add(each);
+ }
+ }
+
+ public static<T> void assertContents(Iterable<T> actual, Iterable<T> expected) {
+ assertContents(actual.iterator(), expected.iterator());
+ }
+
+ public static<T> void assertContents(Iterator<T> actual, Iterator<T> expected) {
+ List<T> history = new ArrayList<>();
+
+ while (expected.hasNext()) {
+ if (!actual.hasNext()) {
+ List<T> expectedData = new ArrayList<>(history);
+ while (expected.hasNext())
+ expectedData.add(expected.next());
+ fail(String.format("Premature end of data; expected=%s, found=%s", expectedData, history));
+ }
+ T a = actual.next();
+ T e = expected.next();
+ history.add(a);
+
+ if (!Objects.equals(a, e))
+ fail(String.format("Data mismatch; preceding=%s, nextExpected=%s, nextFound=%s", history, e, a));
+ }
+ if (actual.hasNext()) {
+ List<T> rest = new ArrayList<>();
+ while (actual.hasNext())
+ rest.add(actual.next());
+ fail(String.format("Unexpected data %s after %s", rest, history));
+ }
+ }
+
+ @SafeVarargs
+ @SuppressWarnings("varargs")
+ public static<T> void assertContents(Iterator<T> actual, T... expected) {
+ assertContents(actual, Arrays.asList(expected).iterator());
+ }
+
+ public static <T> boolean equalsContentsUnordered(Iterable<T> a, Iterable<T> b) {
+ Set<T> sa = new HashSet<>();
+ for (T t : a) {
+ sa.add(t);
+ }
+
+ Set<T> sb = new HashSet<>();
+ for (T t : b) {
+ sb.add(t);
+ }
+
+ return Objects.equals(sa, sb);
+ }
+
+ public static<T extends Comparable<? super T>> void assertContentsUnordered(Iterable<T> actual, Iterable<T> expected) {
+ ArrayList<T> one = new ArrayList<>();
+ for (T t : actual)
+ one.add(t);
+ ArrayList<T> two = new ArrayList<>();
+ for (T t : expected)
+ two.add(t);
+ Collections.sort(one);
+ Collections.sort(two);
+ assertContents(one, two);
+ }
+
+ static <T> void assertSplitContents(Iterable<Iterable<T>> splits, Iterable<T> list) {
+ Iterator<Iterable<T>> mI = splits.iterator();
+ Iterator<T> pI = null;
+ Iterator<T> lI = list.iterator();
+
+ while (lI.hasNext()) {
+ if (pI == null)
+ pI = mI.next().iterator();
+ while (!pI.hasNext()) {
+ if (!mI.hasNext()) {
+ break;
+ }
+ else {
+ pI = mI.next().iterator();
+ }
+ }
+ assertTrue(pI.hasNext());
+ T pT = pI.next();
+ T lT = lI.next();
+ assertEquals(pT, lT);
+ }
+
+ if (pI != null) {
+ assertTrue(!pI.hasNext());
+ }
+
+ while(mI.hasNext()) {
+ pI = mI.next().iterator();
+ assertTrue(!pI.hasNext());
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/Collection/testlibrary/CollectionSupplier.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.Exception;
+import java.lang.Integer;
+import java.lang.Iterable;
+import java.lang.Override;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.Set;
+
+import org.testng.TestException;
+
+import static org.testng.Assert.assertTrue;
+
+import java.lang.reflect.Constructor;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.function.Supplier;
+
+/**
+ * @library
+ * @summary A Supplier of test cases for Collection tests
+ */
+public final class CollectionSupplier implements Supplier<Iterable<CollectionSupplier.TestCase>> {
+
+ private final String[] classNames;
+ private final int size;
+
+ /**
+ * A Collection test case.
+ */
+ public static final class TestCase {
+
+ /**
+ * The name of the test case.
+ */
+ public final String name;
+
+ /**
+ * Class name of the instantiated Collection.
+ */
+ public final String className;
+
+ /**
+ * Unmodifiable reference collection, useful for comparisons.
+ */
+ public final Collection<Integer> original;
+
+ /**
+ * A modifiable test collection.
+ */
+ public final Collection<Integer> collection;
+
+ /**
+ * Create a Collection test case.
+ * @param name name of the test case
+ * @param className class name of the instantiated collection
+ * @param original reference collection
+ * @param collection the modifiable test collection
+ */
+ public TestCase(String name, String className,
+ Collection<Integer> original, Collection<Integer> collection) {
+ this.name = name;
+ this.className = className;
+ this.original =
+ List.class.isAssignableFrom(original.getClass()) ?
+ Collections.unmodifiableList((List<Integer>) original) :
+ Set.class.isAssignableFrom(original.getClass()) ?
+ Collections.unmodifiableSet((Set<Integer>) original) :
+ Collections.unmodifiableCollection(original);
+ this.collection = collection;
+ }
+
+ @Override
+ public String toString() {
+ return name + " " + className +
+ "\n original: " + original +
+ "\n target: " + collection;
+ }
+ }
+
+ /**
+ * Shuffle a list using a PRNG with known seed for repeatability
+ * @param list the list to be shuffled
+ */
+ public static <E> void shuffle(final List<E> list) {
+ // PRNG with known seed for repeatable tests
+ final Random prng = new Random(13);
+ final int size = list.size();
+ for (int i=0; i < size; i++) {
+ // random index in interval [i, size)
+ final int j = i + prng.nextInt(size - i);
+ // swap elements at indices i & j
+ final E e = list.get(i);
+ list.set(i, list.get(j));
+ list.set(j, e);
+ }
+ }
+
+ /**
+ * Create a {@code Supplier} that creates instances of specified collection
+ * classes of specified length.
+ *
+ * @param classNames class names that implement {@code Collection}
+ * @param size the desired size of each collection
+ */
+ public CollectionSupplier(String[] classNames, int size) {
+ this.classNames = Arrays.copyOf(classNames, classNames.length);
+ this.size = size;
+ }
+
+ @Override
+ public Iterable<TestCase> get() {
+ try {
+ return getThrows();
+ } catch (Exception e) {
+ throw new TestException(e);
+ }
+ }
+
+ private Iterable<TestCase> getThrows() throws Exception {
+ final Collection<TestCase> collections = new LinkedList<>();
+ for (final String className : classNames) {
+ @SuppressWarnings("unchecked")
+ final Class<? extends Collection<Integer>> type =
+ (Class<? extends Collection<Integer>>) Class.forName(className);
+ final Constructor<? extends Collection<Integer>>
+ defaultConstructor = type.getConstructor();
+ final Constructor<? extends Collection<Integer>>
+ copyConstructor = type.getConstructor(Collection.class);
+
+ final Collection<Integer> empty = defaultConstructor.newInstance();
+ collections.add(new TestCase("empty",
+ className,
+ copyConstructor.newInstance(empty),
+ empty));
+
+ final Collection<Integer> single = defaultConstructor.newInstance();
+ single.add(42);
+ collections.add(new TestCase("single",
+ className,
+ copyConstructor.newInstance(single),
+ single));
+
+ final Collection<Integer> regular = defaultConstructor.newInstance();
+ for (int i=0; i < size; i++) {
+ regular.add(i);
+ }
+ collections.add(new TestCase("regular",
+ className,
+ copyConstructor.newInstance(regular),
+ regular));
+
+ final Collection<Integer> reverse = defaultConstructor.newInstance();
+ for (int i=size; i >= 0; i--) {
+ reverse.add(i);
+ }
+ collections.add(new TestCase("reverse",
+ className,
+ copyConstructor.newInstance(reverse),
+ reverse));
+
+ final Collection<Integer> odds = defaultConstructor.newInstance();
+ for (int i=0; i < size; i++) {
+ odds.add((i * 2) + 1);
+ }
+ collections.add(new TestCase("odds",
+ className,
+ copyConstructor.newInstance(odds),
+ odds));
+
+ final Collection<Integer> evens = defaultConstructor.newInstance();
+ for (int i=0; i < size; i++) {
+ evens.add(i * 2);
+ }
+ collections.add(new TestCase("evens",
+ className,
+ copyConstructor.newInstance(evens),
+ evens));
+
+ final Collection<Integer> fibonacci = defaultConstructor.newInstance();
+ int prev2 = 0;
+ int prev1 = 1;
+ for (int i=0; i < size; i++) {
+ final int n = prev1 + prev2;
+ if (n < 0) { // stop on overflow
+ break;
+ }
+ fibonacci.add(n);
+ prev2 = prev1;
+ prev1 = n;
+ }
+ collections.add(new TestCase("fibonacci",
+ className,
+ copyConstructor.newInstance(fibonacci),
+ fibonacci));
+
+ // variants where the size of the backing storage != reported size
+ // created by removing half of the elements
+
+ final Collection<Integer> emptyWithSlack = defaultConstructor.newInstance();
+ emptyWithSlack.add(42);
+ assertTrue(emptyWithSlack.remove(42));
+ collections.add(new TestCase("emptyWithSlack",
+ className,
+ copyConstructor.newInstance(emptyWithSlack),
+ emptyWithSlack));
+
+ final Collection<Integer> singleWithSlack = defaultConstructor.newInstance();
+ singleWithSlack.add(42);
+ singleWithSlack.add(43);
+ assertTrue(singleWithSlack.remove(43));
+ collections.add(new TestCase("singleWithSlack",
+ className,
+ copyConstructor.newInstance(singleWithSlack),
+ singleWithSlack));
+
+ final Collection<Integer> regularWithSlack = defaultConstructor.newInstance();
+ for (int i=0; i < (2 * size); i++) {
+ regularWithSlack.add(i);
+ }
+ assertTrue(regularWithSlack.removeIf((x) -> {return x >= size;}));
+ collections.add(new TestCase("regularWithSlack",
+ className,
+ copyConstructor.newInstance(regularWithSlack),
+ regularWithSlack));
+
+ final Collection<Integer> reverseWithSlack = defaultConstructor.newInstance();
+ for (int i=2 * size; i >= 0; i--) {
+ reverseWithSlack.add(i);
+ }
+ assertTrue(reverseWithSlack.removeIf((x) -> {return x < size;}));
+ collections.add(new TestCase("reverseWithSlack",
+ className,
+ copyConstructor.newInstance(reverseWithSlack),
+ reverseWithSlack));
+
+ final Collection<Integer> oddsWithSlack = defaultConstructor.newInstance();
+ for (int i = 0; i < 2 * size; i++) {
+ oddsWithSlack.add((i * 2) + 1);
+ }
+ assertTrue(oddsWithSlack.removeIf((x) -> {return x >= size;}));
+ collections.add(new TestCase("oddsWithSlack",
+ className,
+ copyConstructor.newInstance(oddsWithSlack),
+ oddsWithSlack));
+
+ final Collection<Integer> evensWithSlack = defaultConstructor.newInstance();
+ for (int i = 0; i < 2 * size; i++) {
+ evensWithSlack.add(i * 2);
+ }
+ assertTrue(evensWithSlack.removeIf((x) -> {return x >= size;}));
+ collections.add(new TestCase("evensWithSlack",
+ className,
+ copyConstructor.newInstance(evensWithSlack),
+ evensWithSlack));
+
+ final Collection<Integer> fibonacciWithSlack = defaultConstructor.newInstance();
+ prev2 = 0;
+ prev1 = 1;
+ for (int i=0; i < size; i++) {
+ final int n = prev1 + prev2;
+ if (n < 0) { // stop on overflow
+ break;
+ }
+ fibonacciWithSlack.add(n);
+ prev2 = prev1;
+ prev1 = n;
+ }
+ assertTrue(fibonacciWithSlack.removeIf((x) -> {return x < 20;}));
+ collections.add(new TestCase("fibonacciWithSlack",
+ className,
+ copyConstructor.newInstance(fibonacciWithSlack),
+ fibonacciWithSlack));
+
+ }
+
+ return collections;
+ }
+
+}
--- a/jdk/test/java/util/Locale/LocaleProviders.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/java/util/Locale/LocaleProviders.java Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
import java.text.*;
import java.text.spi.*;
import java.util.*;
+import java.util.spi.*;
import sun.util.locale.provider.LocaleProviderAdapter;
public class LocaleProviders {
@@ -55,6 +56,10 @@
bug8001440Test();
break;
+ case "bug8010666Test":
+ bug8010666Test();
+ break;
+
default:
throw new RuntimeException("Test method '"+methodName+"' not found.");
}
@@ -103,4 +108,38 @@
NumberFormat nf = NumberFormat.getInstance(locale);
String nu = nf.format(1234560);
}
+
+ // This test assumes Windows localized language/country display names.
+ static void bug8010666Test() {
+ if (System.getProperty("os.name").startsWith("Windows")) {
+ NumberFormat nf = NumberFormat.getInstance(Locale.US);
+ try {
+ double ver = nf.parse(System.getProperty("os.version")).doubleValue();
+ System.out.printf("Windows version: %.1f\n", ver);
+ if (ver >= 6.0) {
+ LocaleProviderAdapter lda = LocaleProviderAdapter.getAdapter(LocaleNameProvider.class, Locale.ENGLISH);
+ LocaleProviderAdapter.Type type = lda.getAdapterType();
+ if (type == LocaleProviderAdapter.Type.HOST) {
+ Locale mkmk = Locale.forLanguageTag("mk-MK");
+ String result = mkmk.getDisplayLanguage(Locale.ENGLISH);
+ if (!"Macedonian (FYROM)".equals(result)) {
+ throw new RuntimeException("Windows locale name provider did not return expected localized language name for \"mk\". Returned name was \"" + result + "\"");
+ }
+ result = Locale.US.getDisplayLanguage(Locale.ENGLISH);
+ if (!"English".equals(result)) {
+ throw new RuntimeException("Windows locale name provider did not return expected localized language name for \"en\". Returned name was \"" + result + "\"");
+ }
+ result = Locale.US.getDisplayCountry(Locale.ENGLISH);
+ if (ver >= 6.1 && !"United States".equals(result)) {
+ throw new RuntimeException("Windows locale name provider did not return expected localized country name for \"US\". Returned name was \"" + result + "\"");
+ }
+ } else {
+ throw new RuntimeException("Windows Host LocaleProviderAdapter was not selected for English locale.");
+ }
+ }
+ } catch (ParseException pe) {
+ throw new RuntimeException("Parsing Windows version failed: "+pe.toString());
+ }
+ }
+ }
}
--- a/jdk/test/java/util/Locale/LocaleProviders.sh Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/java/util/Locale/LocaleProviders.sh Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
#!/bin/sh
#
# @test
-# @bug 6336885 7196799 7197573 7198834 8000245 8000615 8001440
+# @bug 6336885 7196799 7197573 7198834 8000245 8000615 8001440 8010666
# @summary tests for "java.locale.providers" system property
# @compile -XDignore.symbol.file LocaleProviders.java
# @run shell/timeout=600 LocaleProviders.sh
@@ -258,4 +258,15 @@
PARAM3=
runTest
+# testing 8010666 fix.
+if [ "${DEFLANG}" = "en" ]
+then
+ METHODNAME=bug8010666Test
+ PREFLIST=HOST
+ PARAM1=
+ PARAM2=
+ PARAM3=
+ runTest
+fi
+
exit $result
--- a/jdk/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/java/util/Spliterator/SpliteratorTraversingAndSplittingTest.java Tue Apr 23 11:13:38 2013 +0100
@@ -184,6 +184,8 @@
@Override
public boolean tryAdvance(Consumer<? super Integer> action) {
+ if (action == null)
+ throw new NullPointerException();
if (it.hasNext()) {
action.accept(it.next());
return true;
@@ -193,7 +195,7 @@
}
}
}
- db.add("new Spliterators.AbstractAdvancingSpliterator()",
+ db.add("new Spliterators.AbstractSpliterator()",
() -> new SpliteratorFromIterator(exp.iterator(), exp.size()));
// Collections
@@ -370,7 +372,28 @@
db.addCollection(c -> Collections.singletonList(exp.get(0)));
}
- // @@@ Collections.synchronized/unmodifiable/checked wrappers
+ // Collections.synchronized/unmodifiable/checked wrappers
+ db.addCollection(Collections::unmodifiableCollection);
+ db.addCollection(c -> Collections.unmodifiableSet(new HashSet<>(c)));
+ db.addCollection(c -> Collections.unmodifiableSortedSet(new TreeSet<>(c)));
+ db.addList(c -> Collections.unmodifiableList(new ArrayList<>(c)));
+ db.addMap(Collections::unmodifiableMap);
+ db.addMap(m -> Collections.unmodifiableSortedMap(new TreeMap<>(m)));
+
+ db.addCollection(Collections::synchronizedCollection);
+ db.addCollection(c -> Collections.synchronizedSet(new HashSet<>(c)));
+ db.addCollection(c -> Collections.synchronizedSortedSet(new TreeSet<>(c)));
+ db.addList(c -> Collections.synchronizedList(new ArrayList<>(c)));
+ db.addMap(Collections::synchronizedMap);
+ db.addMap(m -> Collections.synchronizedSortedMap(new TreeMap<>(m)));
+
+ db.addCollection(c -> Collections.checkedCollection(c, Integer.class));
+ db.addCollection(c -> Collections.checkedQueue(new ArrayDeque<>(c), Integer.class));
+ db.addCollection(c -> Collections.checkedSet(new HashSet<>(c), Integer.class));
+ db.addCollection(c -> Collections.checkedSortedSet(new TreeSet<>(c), Integer.class));
+ db.addList(c -> Collections.checkedList(new ArrayList<>(c), Integer.class));
+ db.addMap(c -> Collections.checkedMap(c, Integer.class, Integer.class));
+ db.addMap(m -> Collections.checkedSortedMap(new TreeMap<>(m), Integer.class, Integer.class));
// Maps
@@ -402,6 +425,13 @@
@Test(dataProvider = "Spliterator<Integer>")
@SuppressWarnings({"unchecked", "rawtypes"})
+ public void testNullPointerException(String description, Collection exp, Supplier<Spliterator> s) {
+ executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining(null));
+ executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance(null));
+ }
+
+ @Test(dataProvider = "Spliterator<Integer>")
+ @SuppressWarnings({"unchecked", "rawtypes"})
public void testForEach(String description, Collection exp, Supplier<Spliterator> s) {
testForEach(exp, s, (Consumer<Object> b) -> b);
}
@@ -507,6 +537,8 @@
@Override
public boolean tryAdvance(IntConsumer action) {
+ if (action == null)
+ throw new NullPointerException();
if (index < a.length) {
action.accept(a[index++]);
return true;
@@ -553,6 +585,12 @@
}
@Test(dataProvider = "Spliterator.OfInt")
+ public void testIntNullPointerException(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) {
+ executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((IntConsumer) null));
+ executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((IntConsumer) null));
+ }
+
+ @Test(dataProvider = "Spliterator.OfInt")
public void testIntForEach(String description, Collection<Integer> exp, Supplier<Spliterator.OfInt> s) {
testForEach(exp, s, intBoxingConsumer());
}
@@ -652,6 +690,8 @@
@Override
public boolean tryAdvance(LongConsumer action) {
+ if (action == null)
+ throw new NullPointerException();
if (index < a.length) {
action.accept(a[index++]);
return true;
@@ -705,6 +745,12 @@
}
@Test(dataProvider = "Spliterator.OfLong")
+ public void testLongNullPointerException(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) {
+ executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((LongConsumer) null));
+ executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((LongConsumer) null));
+ }
+
+ @Test(dataProvider = "Spliterator.OfLong")
public void testLongForEach(String description, Collection<Long> exp, Supplier<Spliterator.OfLong> s) {
testForEach(exp, s, longBoxingConsumer());
}
@@ -804,6 +850,8 @@
@Override
public boolean tryAdvance(DoubleConsumer action) {
+ if (action == null)
+ throw new NullPointerException();
if (index < a.length) {
action.accept(a[index++]);
return true;
@@ -857,6 +905,12 @@
}
@Test(dataProvider = "Spliterator.OfDouble")
+ public void testDoubleNullPointerException(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) {
+ executeAndCatch(NullPointerException.class, () -> s.get().forEachRemaining((DoubleConsumer) null));
+ executeAndCatch(NullPointerException.class, () -> s.get().tryAdvance((DoubleConsumer) null));
+ }
+
+ @Test(dataProvider = "Spliterator.OfDouble")
public void testDoubleForEach(String description, Collection<Double> exp, Supplier<Spliterator.OfDouble> s) {
testForEach(exp, s, doubleBoxingConsumer());
}
@@ -1057,8 +1111,8 @@
}
private static <T, S extends Spliterator<T>> void visit(int depth, int curLevel,
- List<T> dest, S spliterator, UnaryOperator<Consumer<T>> boxingAdapter,
- int rootCharacteristics, boolean useTryAdvance) {
+ List<T> dest, S spliterator, UnaryOperator<Consumer<T>> boxingAdapter,
+ int rootCharacteristics, boolean useTryAdvance) {
if (curLevel < depth) {
long beforeSize = spliterator.getExactSizeIfKnown();
Spliterator<T> split = spliterator.trySplit();
@@ -1187,13 +1241,13 @@
assertTrue(leftSplit.estimateSize() < parentEstimateSize,
String.format("Left split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
assertTrue(parentAndRightSplit.estimateSize() < parentEstimateSize,
- String.format("Right split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
+ String.format("Right split size estimate %d >= parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
}
else {
assertTrue(leftSplit.estimateSize() <= parentEstimateSize,
- String.format("Left split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
+ String.format("Left split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
assertTrue(parentAndRightSplit.estimateSize() <= parentEstimateSize,
- String.format("Right split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
+ String.format("Right split size estimate %d > parent split size estimate %d", leftSplit.estimateSize(), parentEstimateSize));
}
long leftSize = leftSplit.getExactSizeIfKnown();
@@ -1254,4 +1308,22 @@
});
return result;
}
+
+ private void executeAndCatch(Class<? extends Exception> expected, Runnable r) {
+ Exception caught = null;
+ try {
+ r.run();
+ }
+ catch (Exception e) {
+ caught = e;
+ }
+
+ assertNotNull(caught,
+ String.format("No Exception was thrown, expected an Exception of %s to be thrown",
+ expected.getName()));
+ assertTrue(expected.isInstance(caught),
+ String.format("Exception thrown %s not an instance of %s",
+ caught.getClass().getName(), expected.getName()));
+ }
+
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/DrainFindDeadlockTest.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+import java.lang.management.ThreadInfo;
+import java.lang.management.ThreadMXBean;
+import java.lang.Thread.State;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.util.logging.LogManager;
+import java.util.logging.Logger;
+import java.util.Map;
+
+/**
+ * @test
+ * @bug 8010939
+ * @summary check for deadlock between findLogger() and drainLoggerRefQueueBounded()
+ * @author jim.gish@oracle.com
+ * @build DrainFindDeadlockTest
+ * @run main/othervm/timeout=10 DrainFindDeadlockTest
+ */
+
+/**
+ * This test is checking for a deadlock between
+ * LogManager$LoggerContext.findLogger() and
+ * LogManager.drainLoggerRefQueueBounded() (which could happen by calling
+ * Logger.getLogger() and LogManager.readConfiguration() in different threads)
+ */
+public class DrainFindDeadlockTest {
+ private LogManager mgr = LogManager.getLogManager();
+ private final static int MAX_ITERATIONS = 100;
+
+ // Get a ThreadMXBean so we can check for deadlock. N.B. this may
+ // not be supported on all platforms, which means we will have to
+ // resort to the traditional test timeout method. However, if
+ // we have the support we'll get the deadlock details if one
+ // is detected.
+ private final static ThreadMXBean threadMXBean =
+ ManagementFactory.getThreadMXBean();
+ private final boolean threadMXBeanDeadlockSupported =
+ threadMXBean.isSynchronizerUsageSupported();
+
+ public static void main(String... args) throws IOException, Exception {
+ new DrainFindDeadlockTest().testForDeadlock();
+ }
+
+ public static void randomDelay() {
+ int runs = (int) Math.random() * 1000000;
+ int c = 0;
+
+ for (int i=0; i<runs; ++i) {
+ c=c+i;
+ }
+ }
+
+ public void testForDeadlock() throws IOException, Exception {
+ System.out.println("Deadlock detection "
+ + (threadMXBeanDeadlockSupported ? "is" : "is not") +
+ " available.");
+ Thread setup = new Thread(new SetupLogger(), "SetupLogger");
+ Thread readConfig = new Thread(new ReadConfig(), "ReadConfig");
+ Thread check = new Thread(new DeadlockChecker(setup, readConfig),
+ "DeadlockChecker");
+
+ // make the threads daemon threads so they will go away when the
+ // test exits
+ setup.setDaemon(true);
+ readConfig.setDaemon(true);
+ check.setDaemon(true);
+
+ check.start(); setup.start(); readConfig.start();
+ try {
+ check.join();
+ } catch (InterruptedException ex) {
+ ex.printStackTrace();
+ }
+ try {
+ readConfig.join();
+ setup.join();
+ } catch (InterruptedException ex) {
+ ex.printStackTrace();
+ }
+ System.out.println("Test passed");
+ }
+
+ class SetupLogger implements Runnable {
+ Logger logger = null;
+
+ @Override
+ public void run() {
+ System.out.println("Running " + Thread.currentThread().getName());
+
+ for (int i=0; i < MAX_ITERATIONS; i++) {
+ logger = Logger.getLogger("DrainFindDeadlockTest"+i);
+ DrainFindDeadlockTest.randomDelay();
+ }
+ }
+ }
+
+ class ReadConfig implements Runnable {
+ @Override
+ public void run() {
+ System.out.println("Running " + Thread.currentThread().getName());
+ for (int i=0; i < MAX_ITERATIONS; i++) {
+ try {
+ mgr.readConfiguration();
+ } catch (IOException | SecurityException ex) {
+ throw new RuntimeException("FAILED: test setup problem", ex);
+ }
+ DrainFindDeadlockTest.randomDelay();
+ }
+ }
+ }
+
+ class DeadlockChecker implements Runnable {
+ Thread t1, t2;
+
+ DeadlockChecker(Thread t1, Thread t2) {
+ this.t1 = t1;
+ this.t2 = t2;
+ }
+
+ void checkState(Thread x, Thread y) {
+ // System.out.println("checkstate");
+ boolean isXblocked = x.getState().equals(State.BLOCKED);
+ boolean isYblocked = y.getState().equals(State.BLOCKED);
+ long[] deadlockedThreads = null;
+
+ if (isXblocked && isYblocked) {
+ System.out.println("threads blocked");
+ // they are both blocked, but this doesn't necessarily mean
+ // they are deadlocked
+ if (threadMXBeanDeadlockSupported) {
+ System.out.println("checking for deadlock");
+ deadlockedThreads = threadMXBean.findDeadlockedThreads();
+ } else {
+ System.out.println("Can't check for deadlock");
+ }
+ if (deadlockedThreads != null) {
+ System.out.println("We detected a deadlock! ");
+ ThreadInfo[] threadInfos = threadMXBean.getThreadInfo(
+ deadlockedThreads, true, true);
+ for (ThreadInfo threadInfo: threadInfos) {
+ System.out.println(threadInfo);
+ }
+ throw new RuntimeException("TEST FAILED: Deadlock detected");
+ }
+ System.out.println("We may have a deadlock");
+ Map<Thread, StackTraceElement[]> threadMap =
+ Thread.getAllStackTraces();
+ dumpStack(threadMap.get(x), x);
+ dumpStack(threadMap.get(y), y);
+ }
+ }
+
+ private void dumpStack(StackTraceElement[] aStackElt, Thread aThread) {
+ if (aStackElt != null) {
+ System.out.println("Thread:" + aThread.getName() + ": " +
+ aThread.getState());
+ for (StackTraceElement element: aStackElt) {
+ System.out.println(" " + element);
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ System.out.println("Running " + Thread.currentThread().getName());
+ for (int i=0; i < MAX_ITERATIONS*2; i++) {
+ checkState(t1, t2);
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException ex) {
+ };
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/bundlesearch/ClassPathTestBundle_en.properties Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,25 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+sample1=translation #2 for sample1
+sample2=translation #2 for sample2
+supports-test=ResourceBundleSearchTest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/bundlesearch/IndirectlyLoadABundle.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Paths;
+
+/**
+ * This class is used to ensure that a resource bundle loadable by a classloader
+ * is on the caller's stack, but not on the classpath or TCCL to ensure that
+ * Logger.getLogger() can't load the bundle via a stack search
+ *
+ * @author Jim Gish
+ */
+public class IndirectlyLoadABundle {
+
+ private final static String rbName = "StackSearchableResource";
+
+ public boolean loadAndTest() throws Throwable {
+ // Find out where we are running from so we can setup the URLClassLoader URLs
+ // test.src and test.classes will be set if running in jtreg, but probably
+ // not otherwise
+ String testDir = System.getProperty("test.src", System.getProperty("user.dir"));
+ String testClassesDir = System.getProperty("test.classes",
+ System.getProperty("user.dir"));
+ String sep = System.getProperty("file.separator");
+
+ URL[] urls = new URL[2];
+
+ // Allow for both jtreg and standalone cases here
+ urls[0] = Paths.get(testDir, "resources").toUri().toURL();
+ urls[1] = Paths.get(testClassesDir).toUri().toURL();
+
+ System.out.println("INFO: urls[0] = " + urls[0]);
+ System.out.println("INFO: urls[1] = " + urls[1]);
+
+ // Make sure we can find it via the URLClassLoader
+ URLClassLoader yetAnotherResourceCL = new URLClassLoader(urls, null);
+ if (!testForValidResourceSetup(yetAnotherResourceCL)) {
+ throw new Exception("Couldn't directly load bundle " + rbName
+ + " as expected. Test config problem");
+ }
+ // But it shouldn't be available via the system classloader
+ ClassLoader myCL = this.getClass().getClassLoader();
+ if (testForValidResourceSetup(myCL)) {
+ throw new Exception("Was able to directly load bundle " + rbName
+ + " from " + myCL + " but shouldn't have been"
+ + " able to. Test config problem");
+ }
+
+ Class<?> loadItUpClazz = Class.forName("LoadItUp", true, yetAnotherResourceCL);
+ ClassLoader actual = loadItUpClazz.getClassLoader();
+ if (actual != yetAnotherResourceCL) {
+ throw new Exception("LoadItUp was loaded by an unexpected CL: " + actual);
+ }
+ Object loadItUp = loadItUpClazz.newInstance();
+ Method testMethod = loadItUpClazz.getMethod("test", String.class);
+ try {
+ return (Boolean) testMethod.invoke(loadItUp, rbName);
+ } catch (InvocationTargetException ex) {
+ throw ex.getTargetException();
+ }
+ }
+
+ private boolean testForValidResourceSetup(ClassLoader cl) {
+ // First make sure the test environment is setup properly and the bundle actually
+ // exists
+ return ResourceBundleSearchTest.isOnClassPath(rbName, cl);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/bundlesearch/LoadItUp.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+import java.util.MissingResourceException;
+import java.util.logging.Logger;
+
+/*
+ * This class is loaded onto the call stack when the test method is called
+ * and then its classloader can be used to find a property bundle in the same
+ * directory as the class. However, Logger is not allowed
+ * to find the bundle by looking up the stack for this classloader.
+ * We verify that this cannot happen.
+ *
+ * @author Jim Gish
+ */
+public class LoadItUp {
+
+ private final static boolean DEBUG = false;
+
+ public Boolean test(String rbName) throws Exception {
+ // we should not be able to find the resource in this directory via
+ // getLogger calls. The only way that would be possible given this setup
+ // is that if Logger.getLogger searched up the call stack
+ return lookupBundle(rbName);
+ }
+
+ private boolean lookupBundle(String rbName) {
+ // See if Logger.getLogger can find the resource in this directory
+ try {
+ Logger aLogger = Logger.getLogger("NestedLogger", rbName);
+ } catch (MissingResourceException re) {
+ if (DEBUG) {
+ System.out.println(
+ "As expected, LoadItUp.lookupBundle() did not find the bundle "
+ + rbName);
+ }
+ return false;
+ }
+ System.out.println("FAILED: LoadItUp.lookupBundle() found the bundle "
+ + rbName + " using a stack search.");
+ return true;
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/bundlesearch/ResourceBundleSearchTest.java Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8002070
+ * @summary Remove the stack search for a resource bundle Logger to use
+ * @author Jim Gish
+ * @build ResourceBundleSearchTest IndirectlyLoadABundle LoadItUp
+ * @run main ResourceBundleSearchTest
+ */
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+import java.util.MissingResourceException;
+import java.util.ResourceBundle;
+import java.util.logging.Logger;
+
+public class ResourceBundleSearchTest {
+
+ private final static boolean DEBUG = false;
+ private final static String LOGGER_PREFIX = "myLogger.";
+ private static int loggerNum = 0;
+ private final static String PROP_RB_NAME = "ClassPathTestBundle";
+ private final static String TCCL_TEST_BUNDLE = "ContextClassLoaderTestBundle";
+
+ private static int numPass = 0;
+ private static int numFail = 0;
+ private static List<String> msgs = new ArrayList<>();
+
+ public static void main(String[] args) throws Throwable {
+ ResourceBundleSearchTest test = new ResourceBundleSearchTest();
+ test.runTests();
+ }
+
+ private void runTests() throws Throwable {
+ // ensure we are using en as the default Locale so we can find the resource
+ Locale.setDefault(Locale.ENGLISH);
+
+ String testClasses = System.getProperty("test.classes");
+ System.out.println( "test.classes = " + testClasses );
+
+ ClassLoader myClassLoader = ClassLoader.getSystemClassLoader();
+
+ // Find out where we are running from so we can setup the URLClassLoader URL
+ String userDir = System.getProperty("user.dir");
+ String testDir = System.getProperty("test.src", userDir);
+ String sep = System.getProperty("file.separator");
+
+ URL[] urls = new URL[1];
+
+ urls[0] = Paths.get(testDir, "resources").toUri().toURL();
+ URLClassLoader rbClassLoader = new URLClassLoader(urls);
+
+ // Test 1 - can we find a Logger bundle from doing a stack search?
+ // We shouldn't be able to
+ assertFalse(testGetBundleFromStackSearch(), "testGetBundleFromStackSearch");
+
+ // Test 2 - can we find a Logger bundle off of the Thread context class
+ // loader? We should be able to.
+ assertTrue(
+ testGetBundleFromTCCL(TCCL_TEST_BUNDLE, rbClassLoader),
+ "testGetBundleFromTCCL");
+
+ // Test 3 - Can we find a Logger bundle from the classpath? We should be
+ // able to, but ....
+ // We check to see if the bundle is on the classpath or not so that this
+ // will work standalone. In the case of jtreg/samevm,
+ // the resource bundles are not on the classpath. Running standalone
+ // (or othervm), they are
+ if (isOnClassPath(PROP_RB_NAME, myClassLoader)) {
+ debug("We should be able to see " + PROP_RB_NAME + " on the classpath");
+ assertTrue(testGetBundleFromSystemClassLoader(PROP_RB_NAME),
+ "testGetBundleFromSystemClassLoader");
+ } else {
+ debug("We should not be able to see " + PROP_RB_NAME + " on the classpath");
+ assertFalse(testGetBundleFromSystemClassLoader(PROP_RB_NAME),
+ "testGetBundleFromSystemClassLoader");
+ }
+
+ report();
+ }
+
+ private void report() throws Exception {
+ System.out.println("Num passed = " + numPass + " Num failed = " + numFail);
+ if (numFail > 0) {
+ // We only care about the messages if they were errors
+ for (String msg : msgs) {
+ System.out.println(msg);
+ }
+ throw new Exception(numFail + " out of " + (numPass + numFail)
+ + " tests failed.");
+ }
+ }
+
+ public void assertTrue(boolean testResult, String testName) {
+ if (testResult) {
+ numPass++;
+ } else {
+ numFail++;
+ System.out.println("FAILED: " + testName
+ + " was supposed to return true but did NOT!");
+ }
+ }
+
+ public void assertFalse(boolean testResult, String testName) {
+ if (!testResult) {
+ numPass++;
+ } else {
+ numFail++;
+ System.out.println("FAILED: " + testName
+ + " was supposed to return false but did NOT!");
+ }
+ }
+
+ public boolean testGetBundleFromStackSearch() throws Throwable {
+ // This should fail. This was the old functionality to search up the
+ // caller's call stack
+ IndirectlyLoadABundle indirectLoader = new IndirectlyLoadABundle();
+ return indirectLoader.loadAndTest();
+ }
+
+ public boolean testGetBundleFromTCCL(String bundleName,
+ ClassLoader setOnTCCL) throws InterruptedException {
+ // This should succeed. We should be able to get the bundle from the
+ // thread context class loader
+ debug("Looking for " + bundleName + " using TCCL");
+ LoggingThread lr = new LoggingThread(bundleName, setOnTCCL);
+ lr.start();
+ synchronized (lr) {
+ try {
+ lr.wait();
+ } catch (InterruptedException ex) {
+ throw ex;
+ }
+ }
+ msgs.add(lr.msg);
+ return lr.foundBundle;
+ }
+
+ /*
+ * @param String bundleClass
+ * @param ClassLoader to use for search
+ * @return true iff bundleClass is on system classpath
+ */
+ public static boolean isOnClassPath(String baseName, ClassLoader cl) {
+ ResourceBundle rb = null;
+ try {
+ rb = ResourceBundle.getBundle(baseName, Locale.getDefault(), cl);
+ System.out.println("INFO: Found bundle " + baseName + " on " + cl);
+ } catch (MissingResourceException e) {
+ System.out.println("INFO: Could not find bundle " + baseName + " on " + cl);
+ return false;
+ }
+ return (rb != null);
+ }
+
+ private static String newLoggerName() {
+ // we need a new logger name every time we attempt to find a bundle via
+ // the Logger.getLogger call, so we'll simply tack on an integer which
+ // we increment each time this is called
+ loggerNum++;
+ return LOGGER_PREFIX + loggerNum;
+ }
+
+ public boolean testGetBundleFromSystemClassLoader(String bundleName) {
+ // this should succeed if the bundle is on the system classpath.
+ try {
+ Logger aLogger = Logger.getLogger(ResourceBundleSearchTest.newLoggerName(),
+ bundleName);
+ } catch (MissingResourceException re) {
+ msgs.add("INFO: testGetBundleFromSystemClassLoader() did not find bundle "
+ + bundleName);
+ return false;
+ }
+ msgs.add("INFO: testGetBundleFromSystemClassLoader() found the bundle "
+ + bundleName);
+ return true;
+ }
+
+ public static class LoggingThread extends Thread {
+
+ boolean foundBundle = false;
+ String msg = null;
+ ClassLoader clToSetOnTCCL = null;
+ String bundleName = null;
+
+ public LoggingThread(String bundleName) {
+ this.bundleName = bundleName;
+ }
+
+ public LoggingThread(String bundleName, ClassLoader setOnTCCL) {
+ this.clToSetOnTCCL = setOnTCCL;
+ this.bundleName = bundleName;
+ }
+
+ public void run() {
+ boolean setTCCL = false;
+ try {
+ if (clToSetOnTCCL != null) {
+ Thread.currentThread().setContextClassLoader(clToSetOnTCCL);
+ setTCCL = true;
+ }
+ // this should succeed if the bundle is on the system classpath.
+ try {
+ Logger aLogger = Logger.getLogger(ResourceBundleSearchTest.newLoggerName(),
+ bundleName);
+ msg = "INFO: LoggingRunnable() found the bundle " + bundleName
+ + (setTCCL ? " with " : " without ") + "setting the TCCL";
+ foundBundle = true;
+ } catch (MissingResourceException re) {
+ msg = "INFO: LoggingRunnable() did not find the bundle " + bundleName
+ + (setTCCL ? " with " : " without ") + "setting the TCCL";
+ foundBundle = false;
+ }
+ } catch (Throwable e) {
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+ }
+
+ private void debug(String msg) {
+ if (DEBUG) {
+ System.out.println(msg);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/bundlesearch/resources/ContextClassLoaderTestBundle_en.properties Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,25 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+sample1=translation #3 for sample1
+sample2=translation #3 for sample2
+supports-test=ResourceBundleSearchTest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/jdk/test/java/util/logging/bundlesearch/resources/StackSearchableResource_en.properties Tue Apr 23 11:13:38 2013 +0100
@@ -0,0 +1,25 @@
+#
+# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+sample1=translation #4 for sample1
+sample2=translation #4 for sample2
+supports-test=ResourceBundleSearchTest
--- a/jdk/test/sun/security/tools/jarsigner/TimestampCheck.java Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/sun/security/tools/jarsigner/TimestampCheck.java Tue Apr 23 11:13:38 2013 +0100
@@ -260,6 +260,8 @@
jarsigner(cmd, 7, false); // tsbad2
jarsigner(cmd, 8, false); // tsbad3
jarsigner(cmd, 9, false); // no cert in timestamp
+ jarsigner(cmd + " -tsapolicyid 1.2.3.4", 0, true);
+ jarsigner(cmd + " -tsapolicyid 1.2.3.5", 0, false);
} else { // Run as a standalone server
System.err.println("Press Enter to quit server");
System.in.read();
--- a/jdk/test/sun/security/tools/jarsigner/ts.sh Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/sun/security/tools/jarsigner/ts.sh Tue Apr 23 11:13:38 2013 +0100
@@ -22,7 +22,7 @@
#
# @test
-# @bug 6543842 6543440 6939248
+# @bug 6543842 6543440 6939248 8009636
# @summary checking response of timestamp
#
# @run shell/timeout=600 ts.sh
--- a/jdk/test/sun/security/tools/keytool/console.sh Wed Apr 17 15:04:59 2013 -0700
+++ b/jdk/test/sun/security/tools/keytool/console.sh Tue Apr 23 11:13:38 2013 +0100
@@ -1,5 +1,3 @@
-#! /bin/sh
-
#
# Copyright (c) 2006, 2008, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -24,10 +22,11 @@
#
# @test
-# @bug 6418647
+# @bug 6418647 8005527
# @summary Doc bug 5035358 shows sun.security.util.Password.readPassword() is buggy.
# @author Weijun Wang
-#
+# @ignore unable to test manual tools that have input from stdin,
+# and output to stderr and stdout
# @run shell/manual console.sh
if [ "$ALT_PASS" = "" ]; then