Merge
authormduigou
Tue, 08 Mar 2011 15:10:48 -0800
changeset 8767 3c56f4183741
parent 8766 9203cd75421b (current diff)
parent 8765 dfc2a131d08a (diff)
child 8768 4e98f3b11578
child 8769 728aa3db9869
child 8776 788449aeb83e
Merge
--- a/jdk/src/share/classes/java/util/concurrent/ConcurrentSkipListMap.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/src/share/classes/java/util/concurrent/ConcurrentSkipListMap.java	Tue Mar 08 15:10:48 2011 -0800
@@ -44,8 +44,8 @@
  * creation time, depending on which constructor is used.
  *
  * <p>This class implements a concurrent variant of <a
- * href="http://www.cs.umd.edu/~pugh/">SkipLists</a> providing
- * expected average <i>log(n)</i> time cost for the
+ * href="http://en.wikipedia.org/wiki/Skip_list" target="_top">SkipLists</a>
+ * providing expected average <i>log(n)</i> time cost for the
  * <tt>containsKey</tt>, <tt>get</tt>, <tt>put</tt> and
  * <tt>remove</tt> operations and their variants.  Insertion, removal,
  * update, and access operations safely execute concurrently by
--- a/jdk/src/share/classes/java/util/concurrent/Exchanger.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/src/share/classes/java/util/concurrent/Exchanger.java	Tue Mar 08 15:10:48 2011 -0800
@@ -164,8 +164,8 @@
      * races between two threads or thread pre-emptions occurring
      * between reading and CASing.  Also, very transient peak
      * contention can be much higher than the average sustainable
-     * levels.  The max limit is decreased on average 50% of the times
-     * that a non-slot-zero wait elapses without being fulfilled.
+     * levels.  An attempt to decrease the max limit is usually made
+     * when a non-slot-zero wait elapses without being fulfilled.
      * Threads experiencing elapsed waits move closer to zero, so
      * eventually find existing (or future) threads even if the table
      * has been shrunk due to inactivity.  The chosen mechanics and
--- a/jdk/src/share/classes/java/util/concurrent/ForkJoinPool.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/src/share/classes/java/util/concurrent/ForkJoinPool.java	Tue Mar 08 15:10:48 2011 -0800
@@ -40,6 +40,7 @@
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.Random;
 import java.util.concurrent.AbstractExecutorService;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
@@ -51,6 +52,7 @@
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.LockSupport;
 import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.Condition;
 
 /**
  * An {@link ExecutorService} for running {@link ForkJoinTask}s.
@@ -158,239 +160,208 @@
      * set of worker threads: Submissions from non-FJ threads enter
      * into a submission queue. Workers take these tasks and typically
      * split them into subtasks that may be stolen by other workers.
-     * The main work-stealing mechanics implemented in class
-     * ForkJoinWorkerThread give first priority to processing tasks
-     * from their own queues (LIFO or FIFO, depending on mode), then
-     * to randomized FIFO steals of tasks in other worker queues, and
-     * lastly to new submissions. These mechanics do not consider
-     * affinities, loads, cache localities, etc, so rarely provide the
-     * best possible performance on a given machine, but portably
-     * provide good throughput by averaging over these factors.
-     * (Further, even if we did try to use such information, we do not
-     * usually have a basis for exploiting it. For example, some sets
-     * of tasks profit from cache affinities, but others are harmed by
-     * cache pollution effects.)
+     * Preference rules give first priority to processing tasks from
+     * their own queues (LIFO or FIFO, depending on mode), then to
+     * randomized FIFO steals of tasks in other worker queues, and
+     * lastly to new submissions.
+     *
+     * The main throughput advantages of work-stealing stem from
+     * decentralized control -- workers mostly take tasks from
+     * themselves or each other. We cannot negate this in the
+     * implementation of other management responsibilities. The main
+     * tactic for avoiding bottlenecks is packing nearly all
+     * essentially atomic control state into a single 64bit volatile
+     * variable ("ctl"). This variable is read on the order of 10-100
+     * times as often as it is modified (always via CAS). (There is
+     * some additional control state, for example variable "shutdown"
+     * for which we can cope with uncoordinated updates.)  This
+     * streamlines synchronization and control at the expense of messy
+     * constructions needed to repack status bits upon updates.
+     * Updates tend not to contend with each other except during
+     * bursts while submitted tasks begin or end.  In some cases when
+     * they do contend, threads can instead do something else
+     * (usually, scan for tasks) until contention subsides.
+     *
+     * To enable packing, we restrict maximum parallelism to (1<<15)-1
+     * (which is far in excess of normal operating range) to allow
+     * ids, counts, and their negations (used for thresholding) to fit
+     * into 16bit fields.
+     *
+     * Recording Workers.  Workers are recorded in the "workers" array
+     * that is created upon pool construction and expanded if (rarely)
+     * necessary.  This is an array as opposed to some other data
+     * structure to support index-based random steals by workers.
+     * Updates to the array recording new workers and unrecording
+     * terminated ones are protected from each other by a seqLock
+     * (scanGuard) but the array is otherwise concurrently readable,
+     * and accessed directly by workers. To simplify index-based
+     * operations, the array size is always a power of two, and all
+     * readers must tolerate null slots. To avoid flailing during
+     * start-up, the array is presized to hold twice #parallelism
+     * workers (which is unlikely to need further resizing during
+     * execution). But to avoid dealing with so many null slots,
+     * variable scanGuard includes a mask for the nearest power of two
+     * that contains all current workers.  All worker thread creation
+     * is on-demand, triggered by task submissions, replacement of
+     * terminated workers, and/or compensation for blocked
+     * workers. However, all other support code is set up to work with
+     * other policies.  To ensure that we do not hold on to worker
+     * references that would prevent GC, ALL accesses to workers are
+     * via indices into the workers array (which is one source of some
+     * of the messy code constructions here). In essence, the workers
+     * array serves as a weak reference mechanism. Thus for example
+     * the wait queue field of ctl stores worker indices, not worker
+     * references.  Access to the workers in associated methods (for
+     * example signalWork) must both index-check and null-check the
+     * IDs. All such accesses ignore bad IDs by returning out early
+     * from what they are doing, since this can only be associated
+     * with termination, in which case it is OK to give up.
      *
-     * Beyond work-stealing support and essential bookkeeping, the
-     * main responsibility of this framework is to take actions when
-     * one worker is waiting to join a task stolen (or always held by)
-     * another.  Because we are multiplexing many tasks on to a pool
-     * of workers, we can't just let them block (as in Thread.join).
-     * We also cannot just reassign the joiner's run-time stack with
-     * another and replace it later, which would be a form of
-     * "continuation", that even if possible is not necessarily a good
-     * idea. Given that the creation costs of most threads on most
-     * systems mainly surrounds setting up runtime stacks, thread
-     * creation and switching is usually not much more expensive than
-     * stack creation and switching, and is more flexible). Instead we
+     * All uses of the workers array, as well as queue arrays, check
+     * that the array is non-null (even if previously non-null). This
+     * allows nulling during termination, which is currently not
+     * necessary, but remains an option for resource-revocation-based
+     * shutdown schemes.
+     *
+     * Wait Queuing. Unlike HPC work-stealing frameworks, we cannot
+     * let workers spin indefinitely scanning for tasks when none can
+     * be found immediately, and we cannot start/resume workers unless
+     * there appear to be tasks available.  On the other hand, we must
+     * quickly prod them into action when new tasks are submitted or
+     * generated.  We park/unpark workers after placing in an event
+     * wait queue when they cannot find work. This "queue" is actually
+     * a simple Treiber stack, headed by the "id" field of ctl, plus a
+     * 15bit counter value to both wake up waiters (by advancing their
+     * count) and avoid ABA effects. Successors are held in worker
+     * field "nextWait".  Queuing deals with several intrinsic races,
+     * mainly that a task-producing thread can miss seeing (and
+     * signalling) another thread that gave up looking for work but
+     * has not yet entered the wait queue. We solve this by requiring
+     * a full sweep of all workers both before (in scan()) and after
+     * (in tryAwaitWork()) a newly waiting worker is added to the wait
+     * queue. During a rescan, the worker might release some other
+     * queued worker rather than itself, which has the same net
+     * effect. Because enqueued workers may actually be rescanning
+     * rather than waiting, we set and clear the "parked" field of
+     * ForkJoinWorkerThread to reduce unnecessary calls to unpark.
+     * (Use of the parked field requires a secondary recheck to avoid
+     * missed signals.)
+     *
+     * Signalling.  We create or wake up workers only when there
+     * appears to be at least one task they might be able to find and
+     * execute.  When a submission is added or another worker adds a
+     * task to a queue that previously had two or fewer tasks, they
+     * signal waiting workers (or trigger creation of new ones if
+     * fewer than the given parallelism level -- see signalWork).
+     * These primary signals are buttressed by signals during rescans
+     * as well as those performed when a worker steals a task and
+     * notices that there are more tasks too; together these cover the
+     * signals needed in cases when more than two tasks are pushed
+     * but untaken.
+     *
+     * Trimming workers. To release resources after periods of lack of
+     * use, a worker starting to wait when the pool is quiescent will
+     * time out and terminate if the pool has remained quiescent for
+     * SHRINK_RATE nanosecs. This will slowly propagate, eventually
+     * terminating all workers after long periods of non-use.
+     *
+     * Submissions. External submissions are maintained in an
+     * array-based queue that is structured identically to
+     * ForkJoinWorkerThread queues except for the use of
+     * submissionLock in method addSubmission. Unlike the case for
+     * worker queues, multiple external threads can add new
+     * submissions, so adding requires a lock.
+     *
+     * Compensation. Beyond work-stealing support and lifecycle
+     * control, the main responsibility of this framework is to take
+     * actions when one worker is waiting to join a task stolen (or
+     * always held by) another.  Because we are multiplexing many
+     * tasks on to a pool of workers, we can't just let them block (as
+     * in Thread.join).  We also cannot just reassign the joiner's
+     * run-time stack with another and replace it later, which would
+     * be a form of "continuation", that even if possible is not
+     * necessarily a good idea since we sometimes need both an
+     * unblocked task and its continuation to progress. Instead we
      * combine two tactics:
      *
      *   Helping: Arranging for the joiner to execute some task that it
      *      would be running if the steal had not occurred.  Method
-     *      ForkJoinWorkerThread.helpJoinTask tracks joining->stealing
+     *      ForkJoinWorkerThread.joinTask tracks joining->stealing
      *      links to try to find such a task.
      *
      *   Compensating: Unless there are already enough live threads,
-     *      method helpMaintainParallelism() may create or
-     *      re-activate a spare thread to compensate for blocked
-     *      joiners until they unblock.
-     *
-     * It is impossible to keep exactly the target (parallelism)
-     * number of threads running at any given time.  Determining
-     * existence of conservatively safe helping targets, the
-     * availability of already-created spares, and the apparent need
-     * to create new spares are all racy and require heuristic
-     * guidance, so we rely on multiple retries of each.  Compensation
-     * occurs in slow-motion. It is triggered only upon timeouts of
-     * Object.wait used for joins. This reduces poor decisions that
-     * would otherwise be made when threads are waiting for others
-     * that are stalled because of unrelated activities such as
-     * garbage collection.
+     *      method tryPreBlock() may create or re-activate a spare
+     *      thread to compensate for blocked joiners until they
+     *      unblock.
      *
      * The ManagedBlocker extension API can't use helping so relies
      * only on compensation in method awaitBlocker.
      *
-     * The main throughput advantages of work-stealing stem from
-     * decentralized control -- workers mostly steal tasks from each
-     * other. We do not want to negate this by creating bottlenecks
-     * implementing other management responsibilities. So we use a
-     * collection of techniques that avoid, reduce, or cope well with
-     * contention. These entail several instances of bit-packing into
-     * CASable fields to maintain only the minimally required
-     * atomicity. To enable such packing, we restrict maximum
-     * parallelism to (1<<15)-1 (enabling twice this (to accommodate
-     * unbalanced increments and decrements) to fit into a 16 bit
-     * field, which is far in excess of normal operating range.  Even
-     * though updates to some of these bookkeeping fields do sometimes
-     * contend with each other, they don't normally cache-contend with
-     * updates to others enough to warrant memory padding or
-     * isolation. So they are all held as fields of ForkJoinPool
-     * objects.  The main capabilities are as follows:
-     *
-     * 1. Creating and removing workers. Workers are recorded in the
-     * "workers" array. This is an array as opposed to some other data
-     * structure to support index-based random steals by workers.
-     * Updates to the array recording new workers and unrecording
-     * terminated ones are protected from each other by a lock
-     * (workerLock) but the array is otherwise concurrently readable,
-     * and accessed directly by workers. To simplify index-based
-     * operations, the array size is always a power of two, and all
-     * readers must tolerate null slots. Currently, all worker thread
-     * creation is on-demand, triggered by task submissions,
-     * replacement of terminated workers, and/or compensation for
-     * blocked workers. However, all other support code is set up to
-     * work with other policies.
-     *
-     * To ensure that we do not hold on to worker references that
-     * would prevent GC, ALL accesses to workers are via indices into
-     * the workers array (which is one source of some of the unusual
-     * code constructions here). In essence, the workers array serves
-     * as a WeakReference mechanism. Thus for example the event queue
-     * stores worker indices, not worker references. Access to the
-     * workers in associated methods (for example releaseEventWaiters)
-     * must both index-check and null-check the IDs. All such accesses
-     * ignore bad IDs by returning out early from what they are doing,
-     * since this can only be associated with shutdown, in which case
-     * it is OK to give up. On termination, we just clobber these
-     * data structures without trying to use them.
-     *
-     * 2. Bookkeeping for dynamically adding and removing workers. We
-     * aim to approximately maintain the given level of parallelism.
-     * When some workers are known to be blocked (on joins or via
-     * ManagedBlocker), we may create or resume others to take their
-     * place until they unblock (see below). Implementing this
-     * requires counts of the number of "running" threads (i.e., those
-     * that are neither blocked nor artificially suspended) as well as
-     * the total number.  These two values are packed into one field,
-     * "workerCounts" because we need accurate snapshots when deciding
-     * to create, resume or suspend.  Note however that the
-     * correspondence of these counts to reality is not guaranteed. In
-     * particular updates for unblocked threads may lag until they
-     * actually wake up.
-     *
-     * 3. Maintaining global run state. The run state of the pool
-     * consists of a runLevel (SHUTDOWN, TERMINATING, etc) similar to
-     * those in other Executor implementations, as well as a count of
-     * "active" workers -- those that are, or soon will be, or
-     * recently were executing tasks. The runLevel and active count
-     * are packed together in order to correctly trigger shutdown and
-     * termination. Without care, active counts can be subject to very
-     * high contention.  We substantially reduce this contention by
-     * relaxing update rules.  A worker must claim active status
-     * prospectively, by activating if it sees that a submitted or
-     * stealable task exists (it may find after activating that the
-     * task no longer exists). It stays active while processing this
-     * task (if it exists) and any other local subtasks it produces,
-     * until it cannot find any other tasks. It then tries
-     * inactivating (see method preStep), but upon update contention
-     * instead scans for more tasks, later retrying inactivation if it
-     * doesn't find any.
+     * It is impossible to keep exactly the target parallelism number
+     * of threads running at any given time.  Determining the
+     * existence of conservatively safe helping targets, the
+     * availability of already-created spares, and the apparent need
+     * to create new spares are all racy and require heuristic
+     * guidance, so we rely on multiple retries of each.  Currently,
+     * in keeping with on-demand signalling policy, we compensate only
+     * if blocking would leave less than one active (non-waiting,
+     * non-blocked) worker. Additionally, to avoid some false alarms
+     * due to GC, lagging counters, system activity, etc, compensated
+     * blocking for joins is only attempted after rechecks stabilize
+     * (retries are interspersed with Thread.yield, for good
+     * citizenship).  The variable blockedCount, incremented before
+     * blocking and decremented after, is sometimes needed to
+     * distinguish cases of waiting for work vs blocking on joins or
+     * other managed sync. Both cases are equivalent for most pool
+     * control, so we can update non-atomically. (Additionally,
+     * contention on blockedCount alleviates some contention on ctl).
      *
-     * 4. Managing idle workers waiting for tasks. We cannot let
-     * workers spin indefinitely scanning for tasks when none are
-     * available. On the other hand, we must quickly prod them into
-     * action when new tasks are submitted or generated.  We
-     * park/unpark these idle workers using an event-count scheme.
-     * Field eventCount is incremented upon events that may enable
-     * workers that previously could not find a task to now find one:
-     * Submission of a new task to the pool, or another worker pushing
-     * a task onto a previously empty queue.  (We also use this
-     * mechanism for configuration and termination actions that
-     * require wakeups of idle workers).  Each worker maintains its
-     * last known event count, and blocks when a scan for work did not
-     * find a task AND its lastEventCount matches the current
-     * eventCount. Waiting idle workers are recorded in a variant of
-     * Treiber stack headed by field eventWaiters which, when nonzero,
-     * encodes the thread index and count awaited for by the worker
-     * thread most recently calling eventSync. This thread in turn has
-     * a record (field nextEventWaiter) for the next waiting worker.
-     * In addition to allowing simpler decisions about need for
-     * wakeup, the event count bits in eventWaiters serve the role of
-     * tags to avoid ABA errors in Treiber stacks. Upon any wakeup,
-     * released threads also try to release at most two others.  The
-     * net effect is a tree-like diffusion of signals, where released
-     * threads (and possibly others) help with unparks.  To further
-     * reduce contention effects a bit, failed CASes to increment
-     * field eventCount are tolerated without retries in signalWork.
-     * Conceptually they are merged into the same event, which is OK
-     * when their only purpose is to enable workers to scan for work.
+     * Shutdown and Termination. A call to shutdownNow atomically sets
+     * the ctl stop bit and then (non-atomically) sets each workers
+     * "terminate" status, cancels all unprocessed tasks, and wakes up
+     * all waiting workers.  Detecting whether termination should
+     * commence after a non-abrupt shutdown() call requires more work
+     * and bookkeeping. We need consensus about quiesence (i.e., that
+     * there is no more work) which is reflected in active counts so
+     * long as there are no current blockers, as well as possible
+     * re-evaluations during independent changes in blocking or
+     * quiescing workers.
      *
-     * 5. Managing suspension of extra workers. When a worker notices
-     * (usually upon timeout of a wait()) that there are too few
-     * running threads, we may create a new thread to maintain
-     * parallelism level, or at least avoid starvation. Usually, extra
-     * threads are needed for only very short periods, yet join
-     * dependencies are such that we sometimes need them in
-     * bursts. Rather than create new threads each time this happens,
-     * we suspend no-longer-needed extra ones as "spares". For most
-     * purposes, we don't distinguish "extra" spare threads from
-     * normal "core" threads: On each call to preStep (the only point
-     * at which we can do this) a worker checks to see if there are
-     * now too many running workers, and if so, suspends itself.
-     * Method helpMaintainParallelism looks for suspended threads to
-     * resume before considering creating a new replacement. The
-     * spares themselves are encoded on another variant of a Treiber
-     * Stack, headed at field "spareWaiters".  Note that the use of
-     * spares is intrinsically racy.  One thread may become a spare at
-     * about the same time as another is needlessly being created. We
-     * counteract this and related slop in part by requiring resumed
-     * spares to immediately recheck (in preStep) to see whether they
-     * should re-suspend.
-     *
-     * 6. Killing off unneeded workers. A timeout mechanism is used to
-     * shed unused workers: The oldest (first) event queue waiter uses
-     * a timed rather than hard wait. When this wait times out without
-     * a normal wakeup, it tries to shutdown any one (for convenience
-     * the newest) other spare or event waiter via
-     * tryShutdownUnusedWorker. This eventually reduces the number of
-     * worker threads to a minimum of one after a long enough period
-     * without use.
-     *
-     * 7. Deciding when to create new workers. The main dynamic
-     * control in this class is deciding when to create extra threads
-     * in method helpMaintainParallelism. We would like to keep
-     * exactly #parallelism threads running, which is an impossible
-     * task. We always need to create one when the number of running
-     * threads would become zero and all workers are busy. Beyond
-     * this, we must rely on heuristics that work well in the
-     * presence of transient phenomena such as GC stalls, dynamic
-     * compilation, and wake-up lags. These transients are extremely
-     * common -- we are normally trying to fully saturate the CPUs on
-     * a machine, so almost any activity other than running tasks
-     * impedes accuracy. Our main defense is to allow parallelism to
-     * lapse for a while during joins, and use a timeout to see if,
-     * after the resulting settling, there is still a need for
-     * additional workers.  This also better copes with the fact that
-     * some of the methods in this class tend to never become compiled
-     * (but are interpreted), so some components of the entire set of
-     * controls might execute 100 times faster than others. And
-     * similarly for cases where the apparent lack of work is just due
-     * to GC stalls and other transient system activity.
-     *
-     * Beware that there is a lot of representation-level coupling
+     * Style notes: There is a lot of representation-level coupling
      * among classes ForkJoinPool, ForkJoinWorkerThread, and
-     * ForkJoinTask.  For example, direct access to "workers" array by
+     * ForkJoinTask.  Most fields of ForkJoinWorkerThread maintain
+     * data structures managed by ForkJoinPool, so are directly
+     * accessed.  Conversely we allow access to "workers" array by
      * workers, and direct access to ForkJoinTask.status by both
      * ForkJoinPool and ForkJoinWorkerThread.  There is little point
      * trying to reduce this, since any associated future changes in
      * representations will need to be accompanied by algorithmic
-     * changes anyway.
+     * changes anyway. All together, these low-level implementation
+     * choices produce as much as a factor of 4 performance
+     * improvement compared to naive implementations, and enable the
+     * processing of billions of tasks per second, at the expense of
+     * some ugliness.
      *
-     * Style notes: There are lots of inline assignments (of form
-     * "while ((local = field) != 0)") which are usually the simplest
-     * way to ensure the required read orderings (which are sometimes
-     * critical). Also several occurrences of the unusual "do {}
-     * while (!cas...)" which is the simplest way to force an update of
-     * a CAS'ed variable. There are also other coding oddities that
-     * help some methods perform reasonably even when interpreted (not
-     * compiled), at the expense of some messy constructions that
-     * reduce byte code counts.
+     * Methods signalWork() and scan() are the main bottlenecks so are
+     * especially heavily micro-optimized/mangled.  There are lots of
+     * inline assignments (of form "while ((local = field) != 0)")
+     * which are usually the simplest way to ensure the required read
+     * orderings (which are sometimes critical). This leads to a
+     * "C"-like style of listing declarations of these locals at the
+     * heads of methods or blocks.  There are several occurrences of
+     * the unusual "do {} while (!cas...)"  which is the simplest way
+     * to force an update of a CAS'ed variable. There are also other
+     * coding oddities that help some methods perform reasonably even
+     * when interpreted (not compiled).
      *
-     * The order of declarations in this file is: (1) statics (2)
-     * fields (along with constants used when unpacking some of them)
-     * (3) internal control methods (4) callbacks and other support
-     * for ForkJoinTask and ForkJoinWorkerThread classes, (5) exported
-     * methods (plus a few little helpers).
+     * The order of declarations in this file is: (1) declarations of
+     * statics (2) fields (along with constants used when unpacking
+     * some of them), listed in an order that tends to reduce
+     * contention among them a bit under most JVMs.  (3) internal
+     * control methods (4) callbacks and other support for
+     * ForkJoinTask and ForkJoinWorkerThread classes, (5) exported
+     * methods (plus a few little helpers). (6) static block
+     * initializing all statics in a minimally dependent order.
      */
 
     /**
@@ -425,15 +396,13 @@
      * overridden in ForkJoinPool constructors.
      */
     public static final ForkJoinWorkerThreadFactory
-        defaultForkJoinWorkerThreadFactory =
-        new DefaultForkJoinWorkerThreadFactory();
+        defaultForkJoinWorkerThreadFactory;
 
     /**
      * Permission required for callers of methods that may start or
      * kill threads.
      */
-    private static final RuntimePermission modifyThreadPermission =
-        new RuntimePermission("modifyThread");
+    private static final RuntimePermission modifyThreadPermission;
 
     /**
      * If there is a security manager, makes sure caller has
@@ -448,63 +417,59 @@
     /**
      * Generator for assigning sequence numbers as pool names.
      */
-    private static final AtomicInteger poolNumberGenerator =
-        new AtomicInteger();
+    private static final AtomicInteger poolNumberGenerator;
 
     /**
-     * The time to block in a join (see awaitJoin) before checking if
-     * a new worker should be (re)started to maintain parallelism
-     * level. The value should be short enough to maintain global
-     * responsiveness and progress but long enough to avoid
-     * counterproductive firings during GC stalls or unrelated system
-     * activity, and to not bog down systems with continual re-firings
-     * on GCs or legitimately long waits.
+     * Generator for initial random seeds for worker victim
+     * selection. This is used only to create initial seeds. Random
+     * steals use a cheaper xorshift generator per steal attempt. We
+     * don't expect much contention on seedGenerator, so just use a
+     * plain Random.
      */
-    private static final long JOIN_TIMEOUT_MILLIS = 250L; // 4 per second
+    static final Random workerSeedGenerator;
 
     /**
-     * The wakeup interval (in nanoseconds) for the oldest worker
-     * waiting for an event to invoke tryShutdownUnusedWorker to
-     * shrink the number of workers.  The exact value does not matter
-     * too much. It must be short enough to release resources during
-     * sustained periods of idleness, but not so short that threads
-     * are continually re-created.
+     * Array holding all worker threads in the pool.  Initialized upon
+     * construction. Array size must be a power of two.  Updates and
+     * replacements are protected by scanGuard, but the array is
+     * always kept in a consistent enough state to be randomly
+     * accessed without locking by workers performing work-stealing,
+     * as well as other traversal-based methods in this class, so long
+     * as reads memory-acquire by first reading ctl. All readers must
+     * tolerate that some array slots may be null.
      */
-    private static final long SHRINK_RATE_NANOS =
-        30L * 1000L * 1000L * 1000L; // 2 per minute
+    ForkJoinWorkerThread[] workers;
 
     /**
-     * Absolute bound for parallelism level. Twice this number plus
-     * one (i.e., 0xfff) must fit into a 16bit field to enable
-     * word-packing for some counts and indices.
+     * Initial size for submission queue array. Must be a power of
+     * two.  In many applications, these always stay small so we use a
+     * small initial cap.
      */
-    private static final int MAX_WORKERS   = 0x7fff;
+    private static final int INITIAL_QUEUE_CAPACITY = 8;
+
+    /**
+     * Maximum size for submission queue array. Must be a power of two
+     * less than or equal to 1 << (31 - width of array entry) to
+     * ensure lack of index wraparound, but is capped at a lower
+     * value to help users trap runaway computations.
+     */
+    private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 24; // 16M
 
     /**
-     * Array holding all worker threads in the pool.  Array size must
-     * be a power of two.  Updates and replacements are protected by
-     * workerLock, but the array is always kept in a consistent enough
-     * state to be randomly accessed without locking by workers
-     * performing work-stealing, as well as other traversal-based
-     * methods in this class. All readers must tolerate that some
-     * array slots may be null.
+     * Array serving as submission queue. Initialized upon construction.
      */
-    volatile ForkJoinWorkerThread[] workers;
+    private ForkJoinTask<?>[] submissionQueue;
 
     /**
-     * Queue for external submissions.
+     * Lock protecting submissions array for addSubmission
      */
-    private final LinkedTransferQueue<ForkJoinTask<?>> submissionQueue;
+    private final ReentrantLock submissionLock;
 
     /**
-     * Lock protecting updates to workers array.
+     * Condition for awaitTermination, using submissionLock for
+     * convenience.
      */
-    private final ReentrantLock workerLock;
-
-    /**
-     * Latch released upon termination.
-     */
-    private final Phaser termination;
+    private final Condition termination;
 
     /**
      * Creation factory for worker threads.
@@ -512,227 +477,719 @@
     private final ForkJoinWorkerThreadFactory factory;
 
     /**
+     * The uncaught exception handler used when any worker abruptly
+     * terminates.
+     */
+    final Thread.UncaughtExceptionHandler ueh;
+
+    /**
+     * Prefix for assigning names to worker threads
+     */
+    private final String workerNamePrefix;
+
+    /**
      * Sum of per-thread steal counts, updated only when threads are
      * idle or terminating.
      */
     private volatile long stealCount;
 
     /**
-     * Encoded record of top of Treiber stack of threads waiting for
-     * events. The top 32 bits contain the count being waited for. The
-     * bottom 16 bits contains one plus the pool index of waiting
-     * worker thread. (Bits 16-31 are unused.)
+     * Main pool control -- a long packed with:
+     * AC: Number of active running workers minus target parallelism (16 bits)
+     * TC: Number of total workers minus target parallelism (16bits)
+     * ST: true if pool is terminating (1 bit)
+     * EC: the wait count of top waiting thread (15 bits)
+     * ID: ~poolIndex of top of Treiber stack of waiting threads (16 bits)
+     *
+     * When convenient, we can extract the upper 32 bits of counts and
+     * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
+     * (int)ctl.  The ec field is never accessed alone, but always
+     * together with id and st. The offsets of counts by the target
+     * parallelism and the positionings of fields makes it possible to
+     * perform the most common checks via sign tests of fields: When
+     * ac is negative, there are not enough active workers, when tc is
+     * negative, there are not enough total workers, when id is
+     * negative, there is at least one waiting worker, and when e is
+     * negative, the pool is terminating.  To deal with these possibly
+     * negative fields, we use casts in and out of "short" and/or
+     * signed shifts to maintain signedness.
      */
-    private volatile long eventWaiters;
-
-    private static final int EVENT_COUNT_SHIFT = 32;
-    private static final int WAITER_ID_MASK    = (1 << 16) - 1;
-
-    /**
-     * A counter for events that may wake up worker threads:
-     *   - Submission of a new task to the pool
-     *   - A worker pushing a task on an empty queue
-     *   - termination
-     */
-    private volatile int eventCount;
-
-    /**
-     * Encoded record of top of Treiber stack of spare threads waiting
-     * for resumption. The top 16 bits contain an arbitrary count to
-     * avoid ABA effects. The bottom 16bits contains one plus the pool
-     * index of waiting worker thread.
-     */
-    private volatile int spareWaiters;
-
-    private static final int SPARE_COUNT_SHIFT = 16;
-    private static final int SPARE_ID_MASK     = (1 << 16) - 1;
+    volatile long ctl;
 
-    /**
-     * Lifecycle control. The low word contains the number of workers
-     * that are (probably) executing tasks. This value is atomically
-     * incremented before a worker gets a task to run, and decremented
-     * when a worker has no tasks and cannot find any.  Bits 16-18
-     * contain runLevel value. When all are zero, the pool is
-     * running. Level transitions are monotonic (running -> shutdown
-     * -> terminating -> terminated) so each transition adds a bit.
-     * These are bundled together to ensure consistent read for
-     * termination checks (i.e., that runLevel is at least SHUTDOWN
-     * and active threads is zero).
-     *
-     * Notes: Most direct CASes are dependent on these bitfield
-     * positions.  Also, this field is non-private to enable direct
-     * performance-sensitive CASes in ForkJoinWorkerThread.
-     */
-    volatile int runState;
+    // bit positions/shifts for fields
+    private static final int  AC_SHIFT   = 48;
+    private static final int  TC_SHIFT   = 32;
+    private static final int  ST_SHIFT   = 31;
+    private static final int  EC_SHIFT   = 16;
+
+    // bounds
+    private static final int  MAX_ID     = 0x7fff;  // max poolIndex
+    private static final int  SMASK      = 0xffff;  // mask short bits
+    private static final int  SHORT_SIGN = 1 << 15;
+    private static final int  INT_SIGN   = 1 << 31;
 
-    // Note: The order among run level values matters.
-    private static final int RUNLEVEL_SHIFT     = 16;
-    private static final int SHUTDOWN           = 1 << RUNLEVEL_SHIFT;
-    private static final int TERMINATING        = 1 << (RUNLEVEL_SHIFT + 1);
-    private static final int TERMINATED         = 1 << (RUNLEVEL_SHIFT + 2);
-    private static final int ACTIVE_COUNT_MASK  = (1 << RUNLEVEL_SHIFT) - 1;
+    // masks
+    private static final long STOP_BIT   = 0x0001L << ST_SHIFT;
+    private static final long AC_MASK    = ((long)SMASK) << AC_SHIFT;
+    private static final long TC_MASK    = ((long)SMASK) << TC_SHIFT;
+
+    // units for incrementing and decrementing
+    private static final long TC_UNIT    = 1L << TC_SHIFT;
+    private static final long AC_UNIT    = 1L << AC_SHIFT;
 
-    /**
-     * Holds number of total (i.e., created and not yet terminated)
-     * and running (i.e., not blocked on joins or other managed sync)
-     * threads, packed together to ensure consistent snapshot when
-     * making decisions about creating and suspending spare
-     * threads. Updated only by CAS. Note that adding a new worker
-     * requires incrementing both counts, since workers start off in
-     * running state.
-     */
-    private volatile int workerCounts;
+    // masks and units for dealing with u = (int)(ctl >>> 32)
+    private static final int  UAC_SHIFT  = AC_SHIFT - 32;
+    private static final int  UTC_SHIFT  = TC_SHIFT - 32;
+    private static final int  UAC_MASK   = SMASK << UAC_SHIFT;
+    private static final int  UTC_MASK   = SMASK << UTC_SHIFT;
+    private static final int  UAC_UNIT   = 1 << UAC_SHIFT;
+    private static final int  UTC_UNIT   = 1 << UTC_SHIFT;
 
-    private static final int TOTAL_COUNT_SHIFT  = 16;
-    private static final int RUNNING_COUNT_MASK = (1 << TOTAL_COUNT_SHIFT) - 1;
-    private static final int ONE_RUNNING        = 1;
-    private static final int ONE_TOTAL          = 1 << TOTAL_COUNT_SHIFT;
+    // masks and units for dealing with e = (int)ctl
+    private static final int  E_MASK     = 0x7fffffff; // no STOP_BIT
+    private static final int  EC_UNIT    = 1 << EC_SHIFT;
 
     /**
      * The target parallelism level.
-     * Accessed directly by ForkJoinWorkerThreads.
      */
     final int parallelism;
 
     /**
+     * Index (mod submission queue length) of next element to take
+     * from submission queue. Usage is identical to that for
+     * per-worker queues -- see ForkJoinWorkerThread internal
+     * documentation.
+     */
+    volatile int queueBase;
+
+    /**
+     * Index (mod submission queue length) of next element to add
+     * in submission queue. Usage is identical to that for
+     * per-worker queues -- see ForkJoinWorkerThread internal
+     * documentation.
+     */
+    int queueTop;
+
+    /**
+     * True when shutdown() has been called.
+     */
+    volatile boolean shutdown;
+
+    /**
      * True if use local fifo, not default lifo, for local polling
      * Read by, and replicated by ForkJoinWorkerThreads
      */
     final boolean locallyFifo;
 
     /**
-     * The uncaught exception handler used when any worker abruptly
-     * terminates.
+     * The number of threads in ForkJoinWorkerThreads.helpQuiescePool.
+     * When non-zero, suppresses automatic shutdown when active
+     * counts become zero.
+     */
+    volatile int quiescerCount;
+
+    /**
+     * The number of threads blocked in join.
+     */
+    volatile int blockedCount;
+
+    /**
+     * Counter for worker Thread names (unrelated to their poolIndex)
+     */
+    private volatile int nextWorkerNumber;
+
+    /**
+     * The index for the next created worker. Accessed under scanGuard.
      */
-    private final Thread.UncaughtExceptionHandler ueh;
+    private int nextWorkerIndex;
+
+    /**
+     * SeqLock and index masking for updates to workers array.  Locked
+     * when SG_UNIT is set. Unlocking clears bit by adding
+     * SG_UNIT. Staleness of read-only operations can be checked by
+     * comparing scanGuard to value before the reads. The low 16 bits
+     * (i.e, anding with SMASK) hold (the smallest power of two
+     * covering all worker indices, minus one, and is used to avoid
+     * dealing with large numbers of null slots when the workers array
+     * is overallocated.
+     */
+    volatile int scanGuard;
+
+    private static final int SG_UNIT = 1 << 16;
+
+    /**
+     * The wakeup interval (in nanoseconds) for a worker waiting for a
+     * task when the pool is quiescent to instead try to shrink the
+     * number of workers.  The exact value does not matter too
+     * much. It must be short enough to release resources during
+     * sustained periods of idleness, but not so short that threads
+     * are continually re-created.
+     */
+    private static final long SHRINK_RATE =
+        4L * 1000L * 1000L * 1000L; // 4 seconds
 
     /**
-     * Pool number, just for assigning useful names to worker threads
+     * Top-level loop for worker threads: On each step: if the
+     * previous step swept through all queues and found no tasks, or
+     * there are excess threads, then possibly blocks. Otherwise,
+     * scans for and, if found, executes a task. Returns when pool
+     * and/or worker terminate.
+     *
+     * @param w the worker
      */
-    private final int poolNumber;
+    final void work(ForkJoinWorkerThread w) {
+        boolean swept = false;                // true on empty scans
+        long c;
+        while (!w.terminate && (int)(c = ctl) >= 0) {
+            int a;                            // active count
+            if (!swept && (a = (int)(c >> AC_SHIFT)) <= 0)
+                swept = scan(w, a);
+            else if (tryAwaitWork(w, c))
+                swept = false;
+        }
+    }
 
-    // Utilities for CASing fields. Note that most of these
-    // are usually manually inlined by callers
+    // Signalling
 
     /**
-     * Increments running count part of workerCounts.
+     * Wakes up or creates a worker.
      */
-    final void incrementRunningCount() {
-        int c;
-        do {} while (!UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                               c = workerCounts,
-                                               c + ONE_RUNNING));
+    final void signalWork() {
+        /*
+         * The while condition is true if: (there is are too few total
+         * workers OR there is at least one waiter) AND (there are too
+         * few active workers OR the pool is terminating).  The value
+         * of e distinguishes the remaining cases: zero (no waiters)
+         * for create, negative if terminating (in which case do
+         * nothing), else release a waiter. The secondary checks for
+         * release (non-null array etc) can fail if the pool begins
+         * terminating after the test, and don't impose any added cost
+         * because JVMs must perform null and bounds checks anyway.
+         */
+        long c; int e, u;
+        while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) &
+                (INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN) && e >= 0) {
+            if (e > 0) {                         // release a waiting worker
+                int i; ForkJoinWorkerThread w; ForkJoinWorkerThread[] ws;
+                if ((ws = workers) == null ||
+                    (i = ~e & SMASK) >= ws.length ||
+                    (w = ws[i]) == null)
+                    break;
+                long nc = (((long)(w.nextWait & E_MASK)) |
+                           ((long)(u + UAC_UNIT) << 32));
+                if (w.eventCount == e &&
+                    UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
+                    w.eventCount = (e + EC_UNIT) & E_MASK;
+                    if (w.parked)
+                        UNSAFE.unpark(w);
+                    break;
+                }
+            }
+            else if (UNSAFE.compareAndSwapLong
+                     (this, ctlOffset, c,
+                      (long)(((u + UTC_UNIT) & UTC_MASK) |
+                             ((u + UAC_UNIT) & UAC_MASK)) << 32)) {
+                addWorker();
+                break;
+            }
+        }
     }
 
     /**
-     * Tries to increment running count part of workerCounts.
+     * Variant of signalWork to help release waiters on rescans.
+     * Tries once to release a waiter if active count < 0.
+     *
+     * @return false if failed due to contention, else true
+     */
+    private boolean tryReleaseWaiter() {
+        long c; int e, i; ForkJoinWorkerThread w; ForkJoinWorkerThread[] ws;
+        if ((e = (int)(c = ctl)) > 0 &&
+            (int)(c >> AC_SHIFT) < 0 &&
+            (ws = workers) != null &&
+            (i = ~e & SMASK) < ws.length &&
+            (w = ws[i]) != null) {
+            long nc = ((long)(w.nextWait & E_MASK) |
+                       ((c + AC_UNIT) & (AC_MASK|TC_MASK)));
+            if (w.eventCount != e ||
+                !UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc))
+                return false;
+            w.eventCount = (e + EC_UNIT) & E_MASK;
+            if (w.parked)
+                UNSAFE.unpark(w);
+        }
+        return true;
+    }
+
+    // Scanning for tasks
+
+    /**
+     * Scans for and, if found, executes one task. Scans start at a
+     * random index of workers array, and randomly select the first
+     * (2*#workers)-1 probes, and then, if all empty, resort to 2
+     * circular sweeps, which is necessary to check quiescence. and
+     * taking a submission only if no stealable tasks were found.  The
+     * steal code inside the loop is a specialized form of
+     * ForkJoinWorkerThread.deqTask, followed bookkeeping to support
+     * helpJoinTask and signal propagation. The code for submission
+     * queues is almost identical. On each steal, the worker completes
+     * not only the task, but also all local tasks that this task may
+     * have generated. On detecting staleness or contention when
+     * trying to take a task, this method returns without finishing
+     * sweep, which allows global state rechecks before retry.
+     *
+     * @param w the worker
+     * @param a the number of active workers
+     * @return true if swept all queues without finding a task
      */
-    final boolean tryIncrementRunningCount() {
-        int c;
-        return UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                        c = workerCounts,
-                                        c + ONE_RUNNING);
+    private boolean scan(ForkJoinWorkerThread w, int a) {
+        int g = scanGuard; // mask 0 avoids useless scans if only one active
+        int m = (parallelism == 1 - a && blockedCount == 0) ? 0 : g & SMASK;
+        ForkJoinWorkerThread[] ws = workers;
+        if (ws == null || ws.length <= m)         // staleness check
+            return false;
+        for (int r = w.seed, k = r, j = -(m + m); j <= m + m; ++j) {
+            ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i;
+            ForkJoinWorkerThread v = ws[k & m];
+            if (v != null && (b = v.queueBase) != v.queueTop &&
+                (q = v.queue) != null && (i = (q.length - 1) & b) >= 0) {
+                long u = (i << ASHIFT) + ABASE;
+                if ((t = q[i]) != null && v.queueBase == b &&
+                    UNSAFE.compareAndSwapObject(q, u, t, null)) {
+                    int d = (v.queueBase = b + 1) - v.queueTop;
+                    v.stealHint = w.poolIndex;
+                    if (d != 0)
+                        signalWork();             // propagate if nonempty
+                    w.execTask(t);
+                }
+                r ^= r << 13; r ^= r >>> 17; w.seed = r ^ (r << 5);
+                return false;                     // store next seed
+            }
+            else if (j < 0) {                     // xorshift
+                r ^= r << 13; r ^= r >>> 17; k = r ^= r << 5;
+            }
+            else
+                ++k;
+        }
+        if (scanGuard != g)                       // staleness check
+            return false;
+        else {                                    // try to take submission
+            ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i;
+            if ((b = queueBase) != queueTop &&
+                (q = submissionQueue) != null &&
+                (i = (q.length - 1) & b) >= 0) {
+                long u = (i << ASHIFT) + ABASE;
+                if ((t = q[i]) != null && queueBase == b &&
+                    UNSAFE.compareAndSwapObject(q, u, t, null)) {
+                    queueBase = b + 1;
+                    w.execTask(t);
+                }
+                return false;
+            }
+            return true;                         // all queues empty
+        }
     }
 
     /**
-     * Tries to decrement running count unless already zero.
-     */
-    final boolean tryDecrementRunningCount() {
-        int wc = workerCounts;
-        if ((wc & RUNNING_COUNT_MASK) == 0)
-            return false;
-        return UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                        wc, wc - ONE_RUNNING);
-    }
-
-    /**
-     * Forces decrement of encoded workerCounts, awaiting nonzero if
-     * (rarely) necessary when other count updates lag.
+     * Tries to enqueue worker w in wait queue and await change in
+     * worker's eventCount.  If the pool is quiescent, possibly
+     * terminates worker upon exit.  Otherwise, before blocking,
+     * rescans queues to avoid missed signals.  Upon finding work,
+     * releases at least one worker (which may be the current
+     * worker). Rescans restart upon detected staleness or failure to
+     * release due to contention. Note the unusual conventions about
+     * Thread.interrupt here and elsewhere: Because interrupts are
+     * used solely to alert threads to check termination, which is
+     * checked here anyway, we clear status (using Thread.interrupted)
+     * before any call to park, so that park does not immediately
+     * return due to status being set via some other unrelated call to
+     * interrupt in user code.
      *
-     * @param dr -- either zero or ONE_RUNNING
-     * @param dt -- either zero or ONE_TOTAL
+     * @param w the calling worker
+     * @param c the ctl value on entry
+     * @return true if waited or another thread was released upon enq
      */
-    private void decrementWorkerCounts(int dr, int dt) {
-        for (;;) {
-            int wc = workerCounts;
-            if ((wc & RUNNING_COUNT_MASK)  - dr < 0 ||
-                (wc >>> TOTAL_COUNT_SHIFT) - dt < 0) {
-                if ((runState & TERMINATED) != 0)
-                    return; // lagging termination on a backout
-                Thread.yield();
+    private boolean tryAwaitWork(ForkJoinWorkerThread w, long c) {
+        int v = w.eventCount;
+        w.nextWait = (int)c;                      // w's successor record
+        long nc = (long)(v & E_MASK) | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
+        if (ctl != c || !UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
+            long d = ctl; // return true if lost to a deq, to force scan
+            return (int)d != (int)c && ((d - c) & AC_MASK) >= 0L;
+        }
+        for (int sc = w.stealCount; sc != 0;) {   // accumulate stealCount
+            long s = stealCount;
+            if (UNSAFE.compareAndSwapLong(this, stealCountOffset, s, s + sc))
+                sc = w.stealCount = 0;
+            else if (w.eventCount != v)
+                return true;                      // update next time
+        }
+        if (parallelism + (int)(nc >> AC_SHIFT) == 0 &&
+            blockedCount == 0 && quiescerCount == 0)
+            idleAwaitWork(w, nc, c, v);           // quiescent
+        for (boolean rescanned = false;;) {
+            if (w.eventCount != v)
+                return true;
+            if (!rescanned) {
+                int g = scanGuard, m = g & SMASK;
+                ForkJoinWorkerThread[] ws = workers;
+                if (ws != null && m < ws.length) {
+                    rescanned = true;
+                    for (int i = 0; i <= m; ++i) {
+                        ForkJoinWorkerThread u = ws[i];
+                        if (u != null) {
+                            if (u.queueBase != u.queueTop &&
+                                !tryReleaseWaiter())
+                                rescanned = false; // contended
+                            if (w.eventCount != v)
+                                return true;
+                        }
+                    }
+                }
+                if (scanGuard != g ||              // stale
+                    (queueBase != queueTop && !tryReleaseWaiter()))
+                    rescanned = false;
+                if (!rescanned)
+                    Thread.yield();                // reduce contention
+                else
+                    Thread.interrupted();          // clear before park
             }
-            if (UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                         wc, wc - (dr + dt)))
-                return;
+            else {
+                w.parked = true;                   // must recheck
+                if (w.eventCount != v) {
+                    w.parked = false;
+                    return true;
+                }
+                LockSupport.park(this);
+                rescanned = w.parked = false;
+            }
         }
     }
 
     /**
-     * Tries decrementing active count; fails on contention.
-     * Called when workers cannot find tasks to run.
+     * If inactivating worker w has caused pool to become
+     * quiescent, check for pool termination, and wait for event
+     * for up to SHRINK_RATE nanosecs (rescans are unnecessary in
+     * this case because quiescence reflects consensus about lack
+     * of work). On timeout, if ctl has not changed, terminate the
+     * worker. Upon its termination (see deregisterWorker), it may
+     * wake up another worker to possibly repeat this process.
+     *
+     * @param w the calling worker
+     * @param currentCtl the ctl value after enqueuing w
+     * @param prevCtl the ctl value if w terminated
+     * @param v the eventCount w awaits change
      */
-    final boolean tryDecrementActiveCount() {
-        int c;
-        return UNSAFE.compareAndSwapInt(this, runStateOffset,
-                                        c = runState, c - 1);
+    private void idleAwaitWork(ForkJoinWorkerThread w, long currentCtl,
+                               long prevCtl, int v) {
+        if (w.eventCount == v) {
+            if (shutdown)
+                tryTerminate(false);
+            ForkJoinTask.helpExpungeStaleExceptions(); // help clean weak refs
+            while (ctl == currentCtl) {
+                long startTime = System.nanoTime();
+                w.parked = true;
+                if (w.eventCount == v)             // must recheck
+                    LockSupport.parkNanos(this, SHRINK_RATE);
+                w.parked = false;
+                if (w.eventCount != v)
+                    break;
+                else if (System.nanoTime() - startTime < SHRINK_RATE)
+                    Thread.interrupted();          // spurious wakeup
+                else if (UNSAFE.compareAndSwapLong(this, ctlOffset,
+                                                   currentCtl, prevCtl)) {
+                    w.terminate = true;            // restore previous
+                    w.eventCount = ((int)currentCtl + EC_UNIT) & E_MASK;
+                    break;
+                }
+            }
+        }
     }
 
+    // Submissions
+
     /**
-     * Advances to at least the given level. Returns true if not
-     * already in at least the given level.
+     * Enqueues the given task in the submissionQueue.  Same idea as
+     * ForkJoinWorkerThread.pushTask except for use of submissionLock.
+     *
+     * @param t the task
      */
-    private boolean advanceRunLevel(int level) {
-        for (;;) {
-            int s = runState;
-            if ((s & level) != 0)
-                return false;
-            if (UNSAFE.compareAndSwapInt(this, runStateOffset, s, s | level))
-                return true;
+    private void addSubmission(ForkJoinTask<?> t) {
+        final ReentrantLock lock = this.submissionLock;
+        lock.lock();
+        try {
+            ForkJoinTask<?>[] q; int s, m;
+            if ((q = submissionQueue) != null) {    // ignore if queue removed
+                long u = (((s = queueTop) & (m = q.length-1)) << ASHIFT)+ABASE;
+                UNSAFE.putOrderedObject(q, u, t);
+                queueTop = s + 1;
+                if (s - queueBase == m)
+                    growSubmissionQueue();
+            }
+        } finally {
+            lock.unlock();
+        }
+        signalWork();
+    }
+
+    //  (pollSubmission is defined below with exported methods)
+
+    /**
+     * Creates or doubles submissionQueue array.
+     * Basically identical to ForkJoinWorkerThread version.
+     */
+    private void growSubmissionQueue() {
+        ForkJoinTask<?>[] oldQ = submissionQueue;
+        int size = oldQ != null ? oldQ.length << 1 : INITIAL_QUEUE_CAPACITY;
+        if (size > MAXIMUM_QUEUE_CAPACITY)
+            throw new RejectedExecutionException("Queue capacity exceeded");
+        if (size < INITIAL_QUEUE_CAPACITY)
+            size = INITIAL_QUEUE_CAPACITY;
+        ForkJoinTask<?>[] q = submissionQueue = new ForkJoinTask<?>[size];
+        int mask = size - 1;
+        int top = queueTop;
+        int oldMask;
+        if (oldQ != null && (oldMask = oldQ.length - 1) >= 0) {
+            for (int b = queueBase; b != top; ++b) {
+                long u = ((b & oldMask) << ASHIFT) + ABASE;
+                Object x = UNSAFE.getObjectVolatile(oldQ, u);
+                if (x != null && UNSAFE.compareAndSwapObject(oldQ, u, x, null))
+                    UNSAFE.putObjectVolatile
+                        (q, ((b & mask) << ASHIFT) + ABASE, x);
+            }
         }
     }
 
-    // workers array maintenance
+    // Blocking support
 
     /**
-     * Records and returns a workers array index for new worker.
+     * Tries to increment blockedCount, decrement active count
+     * (sometimes implicitly) and possibly release or create a
+     * compensating worker in preparation for blocking. Fails
+     * on contention or termination.
+     *
+     * @return true if the caller can block, else should recheck and retry
      */
-    private int recordWorker(ForkJoinWorkerThread w) {
-        // Try using slot totalCount-1. If not available, scan and/or resize
-        int k = (workerCounts >>> TOTAL_COUNT_SHIFT) - 1;
-        final ReentrantLock lock = this.workerLock;
-        lock.lock();
-        try {
-            ForkJoinWorkerThread[] ws = workers;
-            int n = ws.length;
-            if (k < 0 || k >= n || ws[k] != null) {
-                for (k = 0; k < n && ws[k] != null; ++k)
-                    ;
-                if (k == n)
-                    ws = workers = Arrays.copyOf(ws, n << 1);
+    private boolean tryPreBlock() {
+        int b = blockedCount;
+        if (UNSAFE.compareAndSwapInt(this, blockedCountOffset, b, b + 1)) {
+            int pc = parallelism;
+            do {
+                ForkJoinWorkerThread[] ws; ForkJoinWorkerThread w;
+                int e, ac, tc, rc, i;
+                long c = ctl;
+                int u = (int)(c >>> 32);
+                if ((e = (int)c) < 0) {
+                                                 // skip -- terminating
+                }
+                else if ((ac = (u >> UAC_SHIFT)) <= 0 && e != 0 &&
+                         (ws = workers) != null &&
+                         (i = ~e & SMASK) < ws.length &&
+                         (w = ws[i]) != null) {
+                    long nc = ((long)(w.nextWait & E_MASK) |
+                               (c & (AC_MASK|TC_MASK)));
+                    if (w.eventCount == e &&
+                        UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
+                        w.eventCount = (e + EC_UNIT) & E_MASK;
+                        if (w.parked)
+                            UNSAFE.unpark(w);
+                        return true;             // release an idle worker
+                    }
+                }
+                else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) {
+                    long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK);
+                    if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc))
+                        return true;             // no compensation needed
+                }
+                else if (tc + pc < MAX_ID) {
+                    long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
+                    if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
+                        addWorker();
+                        return true;            // create a replacement
+                    }
+                }
+                // try to back out on any failure and let caller retry
+            } while (!UNSAFE.compareAndSwapInt(this, blockedCountOffset,
+                                               b = blockedCount, b - 1));
+        }
+        return false;
+    }
+
+    /**
+     * Decrements blockedCount and increments active count
+     */
+    private void postBlock() {
+        long c;
+        do {} while (!UNSAFE.compareAndSwapLong(this, ctlOffset,  // no mask
+                                                c = ctl, c + AC_UNIT));
+        int b;
+        do {} while(!UNSAFE.compareAndSwapInt(this, blockedCountOffset,
+                                              b = blockedCount, b - 1));
+    }
+
+    /**
+     * Possibly blocks waiting for the given task to complete, or
+     * cancels the task if terminating.  Fails to wait if contended.
+     *
+     * @param joinMe the task
+     */
+    final void tryAwaitJoin(ForkJoinTask<?> joinMe) {
+        int s;
+        Thread.interrupted(); // clear interrupts before checking termination
+        if (joinMe.status >= 0) {
+            if (tryPreBlock()) {
+                joinMe.tryAwaitDone(0L);
+                postBlock();
             }
-            ws[k] = w;
-            int c = eventCount; // advance event count to ensure visibility
-            UNSAFE.compareAndSwapInt(this, eventCountOffset, c, c+1);
-        } finally {
-            lock.unlock();
+            else if ((ctl & STOP_BIT) != 0L)
+                joinMe.cancelIgnoringExceptions();
         }
-        return k;
     }
 
     /**
-     * Nulls out record of worker in workers array.
+     * Possibly blocks the given worker waiting for joinMe to
+     * complete or timeout
+     *
+     * @param joinMe the task
+     * @param millis the wait time for underlying Object.wait
+     */
+    final void timedAwaitJoin(ForkJoinTask<?> joinMe, long nanos) {
+        while (joinMe.status >= 0) {
+            Thread.interrupted();
+            if ((ctl & STOP_BIT) != 0L) {
+                joinMe.cancelIgnoringExceptions();
+                break;
+            }
+            if (tryPreBlock()) {
+                long last = System.nanoTime();
+                while (joinMe.status >= 0) {
+                    long millis = TimeUnit.NANOSECONDS.toMillis(nanos);
+                    if (millis <= 0)
+                        break;
+                    joinMe.tryAwaitDone(millis);
+                    if (joinMe.status < 0)
+                        break;
+                    if ((ctl & STOP_BIT) != 0L) {
+                        joinMe.cancelIgnoringExceptions();
+                        break;
+                    }
+                    long now = System.nanoTime();
+                    nanos -= now - last;
+                    last = now;
+                }
+                postBlock();
+                break;
+            }
+        }
+    }
+
+    /**
+     * If necessary, compensates for blocker, and blocks
+     */
+    private void awaitBlocker(ManagedBlocker blocker)
+        throws InterruptedException {
+        while (!blocker.isReleasable()) {
+            if (tryPreBlock()) {
+                try {
+                    do {} while (!blocker.isReleasable() && !blocker.block());
+                } finally {
+                    postBlock();
+                }
+                break;
+            }
+        }
+    }
+
+    // Creating, registering and deregistring workers
+
+    /**
+     * Tries to create and start a worker; minimally rolls back counts
+     * on failure.
      */
-    private void forgetWorker(ForkJoinWorkerThread w) {
-        int idx = w.poolIndex;
-        // Locking helps method recordWorker avoid unnecessary expansion
-        final ReentrantLock lock = this.workerLock;
-        lock.lock();
+    private void addWorker() {
+        Throwable ex = null;
+        ForkJoinWorkerThread t = null;
         try {
-            ForkJoinWorkerThread[] ws = workers;
-            if (idx >= 0 && idx < ws.length && ws[idx] == w) // verify
-                ws[idx] = null;
-        } finally {
-            lock.unlock();
+            t = factory.newThread(this);
+        } catch (Throwable e) {
+            ex = e;
+        }
+        if (t == null) {  // null or exceptional factory return
+            long c;       // adjust counts
+            do {} while (!UNSAFE.compareAndSwapLong
+                         (this, ctlOffset, c = ctl,
+                          (((c - AC_UNIT) & AC_MASK) |
+                           ((c - TC_UNIT) & TC_MASK) |
+                           (c & ~(AC_MASK|TC_MASK)))));
+            // Propagate exception if originating from an external caller
+            if (!tryTerminate(false) && ex != null &&
+                !(Thread.currentThread() instanceof ForkJoinWorkerThread))
+                UNSAFE.throwException(ex);
+        }
+        else
+            t.start();
+    }
+
+    /**
+     * Callback from ForkJoinWorkerThread constructor to assign a
+     * public name
+     */
+    final String nextWorkerName() {
+        for (int n;;) {
+            if (UNSAFE.compareAndSwapInt(this, nextWorkerNumberOffset,
+                                         n = nextWorkerNumber, ++n))
+                return workerNamePrefix + n;
+        }
+    }
+
+    /**
+     * Callback from ForkJoinWorkerThread constructor to
+     * determine its poolIndex and record in workers array.
+     *
+     * @param w the worker
+     * @return the worker's pool index
+     */
+    final int registerWorker(ForkJoinWorkerThread w) {
+        /*
+         * In the typical case, a new worker acquires the lock, uses
+         * next available index and returns quickly.  Since we should
+         * not block callers (ultimately from signalWork or
+         * tryPreBlock) waiting for the lock needed to do this, we
+         * instead help release other workers while waiting for the
+         * lock.
+         */
+        for (int g;;) {
+            ForkJoinWorkerThread[] ws;
+            if (((g = scanGuard) & SG_UNIT) == 0 &&
+                UNSAFE.compareAndSwapInt(this, scanGuardOffset,
+                                         g, g | SG_UNIT)) {
+                int k = nextWorkerIndex;
+                try {
+                    if ((ws = workers) != null) { // ignore on shutdown
+                        int n = ws.length;
+                        if (k < 0 || k >= n || ws[k] != null) {
+                            for (k = 0; k < n && ws[k] != null; ++k)
+                                ;
+                            if (k == n)
+                                ws = workers = Arrays.copyOf(ws, n << 1);
+                        }
+                        ws[k] = w;
+                        nextWorkerIndex = k + 1;
+                        int m = g & SMASK;
+                        g = k >= m? ((m << 1) + 1) & SMASK : g + (SG_UNIT<<1);
+                    }
+                } finally {
+                    scanGuard = g;
+                }
+                return k;
+            }
+            else if ((ws = workers) != null) { // help release others
+                for (ForkJoinWorkerThread u : ws) {
+                    if (u != null && u.queueBase != u.queueTop) {
+                        if (tryReleaseWaiter())
+                            break;
+                    }
+                }
+            }
         }
     }
 
@@ -743,415 +1200,46 @@
      *
      * @param w the worker
      */
-    final void workerTerminated(ForkJoinWorkerThread w) {
-        forgetWorker(w);
-        decrementWorkerCounts(w.isTrimmed() ? 0 : ONE_RUNNING, ONE_TOTAL);
-        while (w.stealCount != 0) // collect final count
-            tryAccumulateStealCount(w);
-        tryTerminate(false);
-    }
-
-    // Waiting for and signalling events
-
-    /**
-     * Releases workers blocked on a count not equal to current count.
-     * Normally called after precheck that eventWaiters isn't zero to
-     * avoid wasted array checks. Gives up upon a change in count or
-     * upon releasing four workers, letting others take over.
-     */
-    private void releaseEventWaiters() {
-        ForkJoinWorkerThread[] ws = workers;
-        int n = ws.length;
-        long h = eventWaiters;
-        int ec = eventCount;
-        int releases = 4;
-        ForkJoinWorkerThread w; int id;
-        while ((id = (((int)h) & WAITER_ID_MASK) - 1) >= 0 &&
-               (int)(h >>> EVENT_COUNT_SHIFT) != ec &&
-               id < n && (w = ws[id]) != null) {
-            if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset,
-                                          h,  w.nextWaiter)) {
-                LockSupport.unpark(w);
-                if (--releases == 0)
-                    break;
-            }
-            if (eventCount != ec)
-                break;
-            h = eventWaiters;
-        }
-    }
-
-    /**
-     * Tries to advance eventCount and releases waiters. Called only
-     * from workers.
-     */
-    final void signalWork() {
-        int c; // try to increment event count -- CAS failure OK
-        UNSAFE.compareAndSwapInt(this, eventCountOffset, c = eventCount, c+1);
-        if (eventWaiters != 0L)
-            releaseEventWaiters();
-    }
-
-    /**
-     * Adds the given worker to event queue and blocks until
-     * terminating or event count advances from the given value
-     *
-     * @param w the calling worker thread
-     * @param ec the count
-     */
-    private void eventSync(ForkJoinWorkerThread w, int ec) {
-        long nh = (((long)ec) << EVENT_COUNT_SHIFT) | ((long)(w.poolIndex+1));
-        long h;
-        while ((runState < SHUTDOWN || !tryTerminate(false)) &&
-               (((int)(h = eventWaiters) & WAITER_ID_MASK) == 0 ||
-                (int)(h >>> EVENT_COUNT_SHIFT) == ec) &&
-               eventCount == ec) {
-            if (UNSAFE.compareAndSwapLong(this, eventWaitersOffset,
-                                          w.nextWaiter = h, nh)) {
-                awaitEvent(w, ec);
-                break;
+    final void deregisterWorker(ForkJoinWorkerThread w, Throwable ex) {
+        int idx = w.poolIndex;
+        int sc = w.stealCount;
+        int steps = 0;
+        // Remove from array, adjust worker counts and collect steal count.
+        // We can intermix failed removes or adjusts with steal updates
+        do {
+            long s, c;
+            int g;
+            if (steps == 0 && ((g = scanGuard) & SG_UNIT) == 0 &&
+                UNSAFE.compareAndSwapInt(this, scanGuardOffset,
+                                         g, g |= SG_UNIT)) {
+                ForkJoinWorkerThread[] ws = workers;
+                if (ws != null && idx >= 0 &&
+                    idx < ws.length && ws[idx] == w)
+                    ws[idx] = null;    // verify
+                nextWorkerIndex = idx;
+                scanGuard = g + SG_UNIT;
+                steps = 1;
             }
-        }
-    }
-
-    /**
-     * Blocks the given worker (that has already been entered as an
-     * event waiter) until terminating or event count advances from
-     * the given value. The oldest (first) waiter uses a timed wait to
-     * occasionally one-by-one shrink the number of workers (to a
-     * minimum of one) if the pool has not been used for extended
-     * periods.
-     *
-     * @param w the calling worker thread
-     * @param ec the count
-     */
-    private void awaitEvent(ForkJoinWorkerThread w, int ec) {
-        while (eventCount == ec) {
-            if (tryAccumulateStealCount(w)) { // transfer while idle
-                boolean untimed = (w.nextWaiter != 0L ||
-                                   (workerCounts & RUNNING_COUNT_MASK) <= 1);
-                long startTime = untimed ? 0 : System.nanoTime();
-                Thread.interrupted();         // clear/ignore interrupt
-                if (w.isTerminating() || eventCount != ec)
-                    break;                    // recheck after clear
-                if (untimed)
-                    LockSupport.park(w);
-                else {
-                    LockSupport.parkNanos(w, SHRINK_RATE_NANOS);
-                    if (eventCount != ec || w.isTerminating())
-                        break;
-                    if (System.nanoTime() - startTime >= SHRINK_RATE_NANOS)
-                        tryShutdownUnusedWorker(ec);
-                }
-            }
-        }
-    }
-
-    // Maintaining parallelism
-
-    /**
-     * Pushes worker onto the spare stack.
-     */
-    final void pushSpare(ForkJoinWorkerThread w) {
-        int ns = (++w.spareCount << SPARE_COUNT_SHIFT) | (w.poolIndex + 1);
-        do {} while (!UNSAFE.compareAndSwapInt(this, spareWaitersOffset,
-                                               w.nextSpare = spareWaiters,ns));
-    }
-
-    /**
-     * Tries (once) to resume a spare if the number of running
-     * threads is less than target.
-     */
-    private void tryResumeSpare() {
-        int sw, id;
-        ForkJoinWorkerThread[] ws = workers;
-        int n = ws.length;
-        ForkJoinWorkerThread w;
-        if ((sw = spareWaiters) != 0 &&
-            (id = (sw & SPARE_ID_MASK) - 1) >= 0 &&
-            id < n && (w = ws[id]) != null &&
-            (runState >= TERMINATING ||
-             (workerCounts & RUNNING_COUNT_MASK) < parallelism) &&
-            spareWaiters == sw &&
-            UNSAFE.compareAndSwapInt(this, spareWaitersOffset,
-                                     sw, w.nextSpare)) {
-            int c; // increment running count before resume
-            do {} while (!UNSAFE.compareAndSwapInt
-                         (this, workerCountsOffset,
-                          c = workerCounts, c + ONE_RUNNING));
-            if (w.tryUnsuspend())
-                LockSupport.unpark(w);
-            else   // back out if w was shutdown
-                decrementWorkerCounts(ONE_RUNNING, 0);
+            if (steps == 1 &&
+                UNSAFE.compareAndSwapLong(this, ctlOffset, c = ctl,
+                                          (((c - AC_UNIT) & AC_MASK) |
+                                           ((c - TC_UNIT) & TC_MASK) |
+                                           (c & ~(AC_MASK|TC_MASK)))))
+                steps = 2;
+            if (sc != 0 &&
+                UNSAFE.compareAndSwapLong(this, stealCountOffset,
+                                          s = stealCount, s + sc))
+                sc = 0;
+        } while (steps != 2 || sc != 0);
+        if (!tryTerminate(false)) {
+            if (ex != null)   // possibly replace if died abnormally
+                signalWork();
+            else
+                tryReleaseWaiter();
         }
     }
 
-    /**
-     * Tries to increase the number of running workers if below target
-     * parallelism: If a spare exists tries to resume it via
-     * tryResumeSpare.  Otherwise, if not enough total workers or all
-     * existing workers are busy, adds a new worker. In all cases also
-     * helps wake up releasable workers waiting for work.
-     */
-    private void helpMaintainParallelism() {
-        int pc = parallelism;
-        int wc, rs, tc;
-        while (((wc = workerCounts) & RUNNING_COUNT_MASK) < pc &&
-               (rs = runState) < TERMINATING) {
-            if (spareWaiters != 0)
-                tryResumeSpare();
-            else if ((tc = wc >>> TOTAL_COUNT_SHIFT) >= MAX_WORKERS ||
-                     (tc >= pc && (rs & ACTIVE_COUNT_MASK) != tc))
-                break;   // enough total
-            else if (runState == rs && workerCounts == wc &&
-                     UNSAFE.compareAndSwapInt(this, workerCountsOffset, wc,
-                                              wc + (ONE_RUNNING|ONE_TOTAL))) {
-                ForkJoinWorkerThread w = null;
-                Throwable fail = null;
-                try {
-                    w = factory.newThread(this);
-                } catch (Throwable ex) {
-                    fail = ex;
-                }
-                if (w == null) { // null or exceptional factory return
-                    decrementWorkerCounts(ONE_RUNNING, ONE_TOTAL);
-                    tryTerminate(false); // handle failure during shutdown
-                    // If originating from an external caller,
-                    // propagate exception, else ignore
-                    if (fail != null && runState < TERMINATING &&
-                        !(Thread.currentThread() instanceof
-                          ForkJoinWorkerThread))
-                        UNSAFE.throwException(fail);
-                    break;
-                }
-                w.start(recordWorker(w), ueh);
-                if ((workerCounts >>> TOTAL_COUNT_SHIFT) >= pc)
-                    break; // add at most one unless total below target
-            }
-        }
-        if (eventWaiters != 0L)
-            releaseEventWaiters();
-    }
-
-    /**
-     * Callback from the oldest waiter in awaitEvent waking up after a
-     * period of non-use. If all workers are idle, tries (once) to
-     * shutdown an event waiter or a spare, if one exists. Note that
-     * we don't need CAS or locks here because the method is called
-     * only from one thread occasionally waking (and even misfires are
-     * OK). Note that until the shutdown worker fully terminates,
-     * workerCounts will overestimate total count, which is tolerable.
-     *
-     * @param ec the event count waited on by caller (to abort
-     * attempt if count has since changed).
-     */
-    private void tryShutdownUnusedWorker(int ec) {
-        if (runState == 0 && eventCount == ec) { // only trigger if all idle
-            ForkJoinWorkerThread[] ws = workers;
-            int n = ws.length;
-            ForkJoinWorkerThread w = null;
-            boolean shutdown = false;
-            int sw;
-            long h;
-            if ((sw = spareWaiters) != 0) { // prefer killing spares
-                int id = (sw & SPARE_ID_MASK) - 1;
-                if (id >= 0 && id < n && (w = ws[id]) != null &&
-                    UNSAFE.compareAndSwapInt(this, spareWaitersOffset,
-                                             sw, w.nextSpare))
-                    shutdown = true;
-            }
-            else if ((h = eventWaiters) != 0L) {
-                long nh;
-                int id = (((int)h) & WAITER_ID_MASK) - 1;
-                if (id >= 0 && id < n && (w = ws[id]) != null &&
-                    (nh = w.nextWaiter) != 0L && // keep at least one worker
-                    UNSAFE.compareAndSwapLong(this, eventWaitersOffset, h, nh))
-                    shutdown = true;
-            }
-            if (w != null && shutdown) {
-                w.shutdown();
-                LockSupport.unpark(w);
-            }
-        }
-        releaseEventWaiters(); // in case of interference
-    }
-
-    /**
-     * Callback from workers invoked upon each top-level action (i.e.,
-     * stealing a task or taking a submission and running it).
-     * Performs one or more of the following:
-     *
-     * 1. If the worker is active and either did not run a task
-     *    or there are too many workers, try to set its active status
-     *    to inactive and update activeCount. On contention, we may
-     *    try again in this or a subsequent call.
-     *
-     * 2. If not enough total workers, help create some.
-     *
-     * 3. If there are too many running workers, suspend this worker
-     *    (first forcing inactive if necessary).  If it is not needed,
-     *    it may be shutdown while suspended (via
-     *    tryShutdownUnusedWorker).  Otherwise, upon resume it
-     *    rechecks running thread count and need for event sync.
-     *
-     * 4. If worker did not run a task, await the next task event via
-     *    eventSync if necessary (first forcing inactivation), upon
-     *    which the worker may be shutdown via
-     *    tryShutdownUnusedWorker.  Otherwise, help release any
-     *    existing event waiters that are now releasable,
-     *
-     * @param w the worker
-     * @param ran true if worker ran a task since last call to this method
-     */
-    final void preStep(ForkJoinWorkerThread w, boolean ran) {
-        int wec = w.lastEventCount;
-        boolean active = w.active;
-        boolean inactivate = false;
-        int pc = parallelism;
-        while (w.runState == 0) {
-            int rs = runState;
-            if (rs >= TERMINATING) {           // propagate shutdown
-                w.shutdown();
-                break;
-            }
-            if ((inactivate || (active && (rs & ACTIVE_COUNT_MASK) >= pc)) &&
-                UNSAFE.compareAndSwapInt(this, runStateOffset, rs, --rs)) {
-                inactivate = active = w.active = false;
-                if (rs == SHUTDOWN) {          // all inactive and shut down
-                    tryTerminate(false);
-                    continue;
-                }
-            }
-            int wc = workerCounts;             // try to suspend as spare
-            if ((wc & RUNNING_COUNT_MASK) > pc) {
-                if (!(inactivate |= active) && // must inactivate to suspend
-                    workerCounts == wc &&
-                    UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                             wc, wc - ONE_RUNNING))
-                    w.suspendAsSpare();
-            }
-            else if ((wc >>> TOTAL_COUNT_SHIFT) < pc)
-                helpMaintainParallelism();     // not enough workers
-            else if (ran)
-                break;
-            else {
-                long h = eventWaiters;
-                int ec = eventCount;
-                if (h != 0L && (int)(h >>> EVENT_COUNT_SHIFT) != ec)
-                    releaseEventWaiters();     // release others before waiting
-                else if (ec != wec) {
-                    w.lastEventCount = ec;     // no need to wait
-                    break;
-                }
-                else if (!(inactivate |= active))
-                    eventSync(w, wec);         // must inactivate before sync
-            }
-        }
-    }
-
-    /**
-     * Helps and/or blocks awaiting join of the given task.
-     * See above for explanation.
-     *
-     * @param joinMe the task to join
-     * @param worker the current worker thread
-     * @param timed true if wait should time out
-     * @param nanos timeout value if timed
-     */
-    final void awaitJoin(ForkJoinTask<?> joinMe, ForkJoinWorkerThread worker,
-                         boolean timed, long nanos) {
-        long startTime = timed ? System.nanoTime() : 0L;
-        int retries = 2 + (parallelism >> 2); // #helpJoins before blocking
-        boolean running = true;               // false when count decremented
-        while (joinMe.status >= 0) {
-            if (runState >= TERMINATING) {
-                joinMe.cancelIgnoringExceptions();
-                break;
-            }
-            running = worker.helpJoinTask(joinMe, running);
-            if (joinMe.status < 0)
-                break;
-            if (retries > 0) {
-                --retries;
-                continue;
-            }
-            int wc = workerCounts;
-            if ((wc & RUNNING_COUNT_MASK) != 0) {
-                if (running) {
-                    if (!UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                                  wc, wc - ONE_RUNNING))
-                        continue;
-                    running = false;
-                }
-                long h = eventWaiters;
-                if (h != 0L && (int)(h >>> EVENT_COUNT_SHIFT) != eventCount)
-                    releaseEventWaiters();
-                if ((workerCounts & RUNNING_COUNT_MASK) != 0) {
-                    long ms; int ns;
-                    if (!timed) {
-                        ms = JOIN_TIMEOUT_MILLIS;
-                        ns = 0;
-                    }
-                    else { // at most JOIN_TIMEOUT_MILLIS per wait
-                        long nt = nanos - (System.nanoTime() - startTime);
-                        if (nt <= 0L)
-                            break;
-                        ms = nt / 1000000;
-                        if (ms > JOIN_TIMEOUT_MILLIS) {
-                            ms = JOIN_TIMEOUT_MILLIS;
-                            ns = 0;
-                        }
-                        else
-                            ns = (int) (nt % 1000000);
-                    }
-                    joinMe.internalAwaitDone(ms, ns);
-                }
-                if (joinMe.status < 0)
-                    break;
-            }
-            helpMaintainParallelism();
-        }
-        if (!running) {
-            int c;
-            do {} while (!UNSAFE.compareAndSwapInt
-                         (this, workerCountsOffset,
-                          c = workerCounts, c + ONE_RUNNING));
-        }
-    }
-
-    /**
-     * Same idea as awaitJoin, but no helping, retries, or timeouts.
-     */
-    final void awaitBlocker(ManagedBlocker blocker)
-        throws InterruptedException {
-        while (!blocker.isReleasable()) {
-            int wc = workerCounts;
-            if ((wc & RUNNING_COUNT_MASK) == 0)
-                helpMaintainParallelism();
-            else if (UNSAFE.compareAndSwapInt(this, workerCountsOffset,
-                                              wc, wc - ONE_RUNNING)) {
-                try {
-                    while (!blocker.isReleasable()) {
-                        long h = eventWaiters;
-                        if (h != 0L &&
-                            (int)(h >>> EVENT_COUNT_SHIFT) != eventCount)
-                            releaseEventWaiters();
-                        else if ((workerCounts & RUNNING_COUNT_MASK) == 0 &&
-                                 runState < TERMINATING)
-                            helpMaintainParallelism();
-                        else if (blocker.block())
-                            break;
-                    }
-                } finally {
-                    int c;
-                    do {} while (!UNSAFE.compareAndSwapInt
-                                 (this, workerCountsOffset,
-                                  c = workerCounts, c + ONE_RUNNING));
-                }
-                break;
-            }
-        }
-    }
+    // Shutdown and termination
 
     /**
      * Possibly initiates and/or completes termination.
@@ -1161,97 +1249,132 @@
      * @return true if now terminating or terminated
      */
     private boolean tryTerminate(boolean now) {
-        if (now)
-            advanceRunLevel(SHUTDOWN); // ensure at least SHUTDOWN
-        else if (runState < SHUTDOWN ||
-                 !submissionQueue.isEmpty() ||
-                 (runState & ACTIVE_COUNT_MASK) != 0)
-            return false;
-
-        if (advanceRunLevel(TERMINATING))
-            startTerminating();
-
-        // Finish now if all threads terminated; else in some subsequent call
-        if ((workerCounts >>> TOTAL_COUNT_SHIFT) == 0) {
-            advanceRunLevel(TERMINATED);
-            termination.forceTermination();
+        long c;
+        while (((c = ctl) & STOP_BIT) == 0) {
+            if (!now) {
+                if ((int)(c >> AC_SHIFT) != -parallelism)
+                    return false;
+                if (!shutdown || blockedCount != 0 || quiescerCount != 0 ||
+                    queueBase != queueTop) {
+                    if (ctl == c) // staleness check
+                        return false;
+                    continue;
+                }
+            }
+            if (UNSAFE.compareAndSwapLong(this, ctlOffset, c, c | STOP_BIT))
+                startTerminating();
+        }
+        if ((short)(c >>> TC_SHIFT) == -parallelism) { // signal when 0 workers
+            final ReentrantLock lock = this.submissionLock;
+            lock.lock();
+            try {
+                termination.signalAll();
+            } finally {
+                lock.unlock();
+            }
         }
         return true;
     }
 
     /**
-     * Actions on transition to TERMINATING
-     *
-     * Runs up to four passes through workers: (0) shutting down each
-     * (without waking up if parked) to quickly spread notifications
-     * without unnecessary bouncing around event queues etc (1) wake
-     * up and help cancel tasks (2) interrupt (3) mop up races with
-     * interrupted workers
+     * Runs up to three passes through workers: (0) Setting
+     * termination status for each worker, followed by wakeups up to
+     * queued workers; (1) helping cancel tasks; (2) interrupting
+     * lagging threads (likely in external tasks, but possibly also
+     * blocked in joins).  Each pass repeats previous steps because of
+     * potential lagging thread creation.
      */
     private void startTerminating() {
         cancelSubmissions();
-        for (int passes = 0; passes < 4 && workerCounts != 0; ++passes) {
-            int c; // advance event count
-            UNSAFE.compareAndSwapInt(this, eventCountOffset,
-                                     c = eventCount, c+1);
-            eventWaiters = 0L; // clobber lists
-            spareWaiters = 0;
-            for (ForkJoinWorkerThread w : workers) {
-                if (w != null) {
-                    w.shutdown();
-                    if (passes > 0 && !w.isTerminated()) {
-                        w.cancelTasks();
-                        LockSupport.unpark(w);
-                        if (passes > 1 && !w.isInterrupted()) {
-                            try {
-                                w.interrupt();
-                            } catch (SecurityException ignore) {
+        for (int pass = 0; pass < 3; ++pass) {
+            ForkJoinWorkerThread[] ws = workers;
+            if (ws != null) {
+                for (ForkJoinWorkerThread w : ws) {
+                    if (w != null) {
+                        w.terminate = true;
+                        if (pass > 0) {
+                            w.cancelTasks();
+                            if (pass > 1 && !w.isInterrupted()) {
+                                try {
+                                    w.interrupt();
+                                } catch (SecurityException ignore) {
+                                }
                             }
                         }
                     }
                 }
+                terminateWaiters();
+            }
+        }
+    }
+
+    /**
+     * Polls and cancels all submissions. Called only during termination.
+     */
+    private void cancelSubmissions() {
+        while (queueBase != queueTop) {
+            ForkJoinTask<?> task = pollSubmission();
+            if (task != null) {
+                try {
+                    task.cancel(false);
+                } catch (Throwable ignore) {
+                }
             }
         }
     }
 
     /**
-     * Clears out and cancels submissions, ignoring exceptions.
+     * Tries to set the termination status of waiting workers, and
+     * then wakes them up (after which they will terminate).
      */
-    private void cancelSubmissions() {
-        ForkJoinTask<?> task;
-        while ((task = submissionQueue.poll()) != null) {
-            try {
-                task.cancel(false);
-            } catch (Throwable ignore) {
+    private void terminateWaiters() {
+        ForkJoinWorkerThread[] ws = workers;
+        if (ws != null) {
+            ForkJoinWorkerThread w; long c; int i, e;
+            int n = ws.length;
+            while ((i = ~(e = (int)(c = ctl)) & SMASK) < n &&
+                   (w = ws[i]) != null && w.eventCount == (e & E_MASK)) {
+                if (UNSAFE.compareAndSwapLong(this, ctlOffset, c,
+                                              (long)(w.nextWait & E_MASK) |
+                                              ((c + AC_UNIT) & AC_MASK) |
+                                              (c & (TC_MASK|STOP_BIT)))) {
+                    w.terminate = true;
+                    w.eventCount = e + EC_UNIT;
+                    if (w.parked)
+                        UNSAFE.unpark(w);
+                }
             }
         }
     }
 
-    // misc support for ForkJoinWorkerThread
+    // misc ForkJoinWorkerThread support
 
     /**
-     * Returns pool number.
+     * Increment or decrement quiescerCount. Needed only to prevent
+     * triggering shutdown if a worker is transiently inactive while
+     * checking quiescence.
+     *
+     * @param delta 1 for increment, -1 for decrement
      */
-    final int getPoolNumber() {
-        return poolNumber;
+    final void addQuiescerCount(int delta) {
+        int c;
+        do {} while(!UNSAFE.compareAndSwapInt(this, quiescerCountOffset,
+                                              c = quiescerCount, c + delta));
     }
 
     /**
-     * Tries to accumulate steal count from a worker, clearing
-     * the worker's value if successful.
+     * Directly increment or decrement active count without
+     * queuing. This method is used to transiently assert inactivation
+     * while checking quiescence.
      *
-     * @return true if worker steal count now zero
+     * @param delta 1 for increment, -1 for decrement
      */
-    final boolean tryAccumulateStealCount(ForkJoinWorkerThread w) {
-        int sc = w.stealCount;
-        long c = stealCount;
-        // CAS even if zero, for fence effects
-        if (UNSAFE.compareAndSwapLong(this, stealCountOffset, c, c + sc)) {
-            if (sc != 0)
-                w.stealCount = 0;
-            return true;
-        }
-        return sc == 0;
+    final void addActiveCount(int delta) {
+        long d = delta < 0 ? -AC_UNIT : AC_UNIT;
+        long c;
+        do {} while (!UNSAFE.compareAndSwapLong(this, ctlOffset, c = ctl,
+                                                ((c + d) & AC_MASK) |
+                                                (c & ~AC_MASK)));
     }
 
     /**
@@ -1259,16 +1382,17 @@
      * active thread.
      */
     final int idlePerActive() {
-        int pc = parallelism; // use parallelism, not rc
-        int ac = runState;    // no mask -- artificially boosts during shutdown
-        // Use exact results for small values, saturate past 4
-        return ((pc <= ac) ? 0 :
-                (pc >>> 1 <= ac) ? 1 :
-                (pc >>> 2 <= ac) ? 3 :
-                pc >>> 3);
+        // Approximate at powers of two for small values, saturate past 4
+        int p = parallelism;
+        int a = p + (int)(ctl >> AC_SHIFT);
+        return (a > (p >>>= 1) ? 0 :
+                a > (p >>>= 1) ? 1 :
+                a > (p >>>= 1) ? 2 :
+                a > (p >>>= 1) ? 4 :
+                8);
     }
 
-    // Public and protected methods
+    // Exported methods
 
     // Constructors
 
@@ -1337,49 +1461,42 @@
         checkPermission();
         if (factory == null)
             throw new NullPointerException();
-        if (parallelism <= 0 || parallelism > MAX_WORKERS)
+        if (parallelism <= 0 || parallelism > MAX_ID)
             throw new IllegalArgumentException();
         this.parallelism = parallelism;
         this.factory = factory;
         this.ueh = handler;
         this.locallyFifo = asyncMode;
-        int arraySize = initialArraySizeFor(parallelism);
-        this.workers = new ForkJoinWorkerThread[arraySize];
-        this.submissionQueue = new LinkedTransferQueue<ForkJoinTask<?>>();
-        this.workerLock = new ReentrantLock();
-        this.termination = new Phaser(1);
-        this.poolNumber = poolNumberGenerator.incrementAndGet();
-    }
-
-    /**
-     * Returns initial power of two size for workers array.
-     * @param pc the initial parallelism level
-     */
-    private static int initialArraySizeFor(int pc) {
-        // If possible, initially allocate enough space for one spare
-        int size = pc < MAX_WORKERS ? pc + 1 : MAX_WORKERS;
-        // See Hackers Delight, sec 3.2. We know MAX_WORKERS < (1 >>> 16)
-        size |= size >>> 1;
-        size |= size >>> 2;
-        size |= size >>> 4;
-        size |= size >>> 8;
-        return size + 1;
+        long np = (long)(-parallelism); // offset ctl counts
+        this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK);
+        this.submissionQueue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
+        // initialize workers array with room for 2*parallelism if possible
+        int n = parallelism << 1;
+        if (n >= MAX_ID)
+            n = MAX_ID;
+        else { // See Hackers Delight, sec 3.2, where n < (1 << 16)
+            n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8;
+        }
+        workers = new ForkJoinWorkerThread[n + 1];
+        this.submissionLock = new ReentrantLock();
+        this.termination = submissionLock.newCondition();
+        StringBuilder sb = new StringBuilder("ForkJoinPool-");
+        sb.append(poolNumberGenerator.incrementAndGet());
+        sb.append("-worker-");
+        this.workerNamePrefix = sb.toString();
     }
 
     // Execution methods
 
     /**
-     * Submits task and creates, starts, or resumes some workers if necessary
-     */
-    private <T> void doSubmit(ForkJoinTask<T> task) {
-        submissionQueue.offer(task);
-        int c; // try to increment event count -- CAS failure OK
-        UNSAFE.compareAndSwapInt(this, eventCountOffset, c = eventCount, c+1);
-        helpMaintainParallelism();
-    }
-
-    /**
      * Performs the given task, returning its result upon completion.
+     * If the computation encounters an unchecked Exception or Error,
+     * it is rethrown as the outcome of this invocation.  Rethrown
+     * exceptions behave in the same way as regular exceptions, but,
+     * when possible, contain stack traces (as displayed for example
+     * using {@code ex.printStackTrace()}) of both the current thread
+     * as well as the thread actually encountering the exception;
+     * minimally only the latter.
      *
      * @param task the task
      * @return the task's result
@@ -1388,16 +1505,16 @@
      *         scheduled for execution
      */
     public <T> T invoke(ForkJoinTask<T> task) {
+        Thread t = Thread.currentThread();
         if (task == null)
             throw new NullPointerException();
-        if (runState >= SHUTDOWN)
+        if (shutdown)
             throw new RejectedExecutionException();
-        Thread t = Thread.currentThread();
         if ((t instanceof ForkJoinWorkerThread) &&
             ((ForkJoinWorkerThread)t).pool == this)
             return task.invoke();  // bypass submit if in same pool
         else {
-            doSubmit(task);
+            addSubmission(task);
             return task.join();
         }
     }
@@ -1407,14 +1524,15 @@
      * computation in the current pool, else submits as external task.
      */
     private <T> void forkOrSubmit(ForkJoinTask<T> task) {
-        if (runState >= SHUTDOWN)
+        ForkJoinWorkerThread w;
+        Thread t = Thread.currentThread();
+        if (shutdown)
             throw new RejectedExecutionException();
-        Thread t = Thread.currentThread();
         if ((t instanceof ForkJoinWorkerThread) &&
-            ((ForkJoinWorkerThread)t).pool == this)
-            task.fork();
+            (w = (ForkJoinWorkerThread)t).pool == this)
+            w.pushTask(task);
         else
-            doSubmit(task);
+            addSubmission(task);
     }
 
     /**
@@ -1571,7 +1689,7 @@
      * @return the number of worker threads
      */
     public int getPoolSize() {
-        return workerCounts >>> TOTAL_COUNT_SHIFT;
+        return parallelism + (short)(ctl >>> TC_SHIFT);
     }
 
     /**
@@ -1593,7 +1711,8 @@
      * @return the number of worker threads
      */
     public int getRunningThreadCount() {
-        return workerCounts & RUNNING_COUNT_MASK;
+        int r = parallelism + (int)(ctl >> AC_SHIFT);
+        return r <= 0? 0 : r; // suppress momentarily negative values
     }
 
     /**
@@ -1604,7 +1723,8 @@
      * @return the number of active threads
      */
     public int getActiveThreadCount() {
-        return runState & ACTIVE_COUNT_MASK;
+        int r = parallelism + (int)(ctl >> AC_SHIFT) + blockedCount;
+        return r <= 0? 0 : r; // suppress momentarily negative values
     }
 
     /**
@@ -1619,7 +1739,7 @@
      * @return {@code true} if all threads are currently idle
      */
     public boolean isQuiescent() {
-        return (runState & ACTIVE_COUNT_MASK) == 0;
+        return parallelism + (int)(ctl >> AC_SHIFT) + blockedCount == 0;
     }
 
     /**
@@ -1649,21 +1769,25 @@
      */
     public long getQueuedTaskCount() {
         long count = 0;
-        for (ForkJoinWorkerThread w : workers)
-            if (w != null)
-                count += w.getQueueSize();
+        ForkJoinWorkerThread[] ws;
+        if ((short)(ctl >>> TC_SHIFT) > -parallelism &&
+            (ws = workers) != null) {
+            for (ForkJoinWorkerThread w : ws)
+                if (w != null)
+                    count -= w.queueBase - w.queueTop; // must read base first
+        }
         return count;
     }
 
     /**
      * Returns an estimate of the number of tasks submitted to this
-     * pool that have not yet begun executing.  This method takes time
-     * proportional to the number of submissions.
+     * pool that have not yet begun executing.  This method may take
+     * time proportional to the number of submissions.
      *
      * @return the number of queued submissions
      */
     public int getQueuedSubmissionCount() {
-        return submissionQueue.size();
+        return -queueBase + queueTop;
     }
 
     /**
@@ -1673,7 +1797,7 @@
      * @return {@code true} if there are any queued submissions
      */
     public boolean hasQueuedSubmissions() {
-        return !submissionQueue.isEmpty();
+        return queueBase != queueTop;
     }
 
     /**
@@ -1684,7 +1808,19 @@
      * @return the next submission, or {@code null} if none
      */
     protected ForkJoinTask<?> pollSubmission() {
-        return submissionQueue.poll();
+        ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i;
+        while ((b = queueBase) != queueTop &&
+               (q = submissionQueue) != null &&
+               (i = (q.length - 1) & b) >= 0) {
+            long u = (i << ASHIFT) + ABASE;
+            if ((t = q[i]) != null &&
+                queueBase == b &&
+                UNSAFE.compareAndSwapObject(q, u, t, null)) {
+                queueBase = b + 1;
+                return t;
+            }
+        }
+        return null;
     }
 
     /**
@@ -1705,10 +1841,21 @@
      * @return the number of elements transferred
      */
     protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
-        int count = submissionQueue.drainTo(c);
-        for (ForkJoinWorkerThread w : workers)
-            if (w != null)
-                count += w.drainTasksTo(c);
+        int count = 0;
+        while (queueBase != queueTop) {
+            ForkJoinTask<?> t = pollSubmission();
+            if (t != null) {
+                c.add(t);
+                ++count;
+            }
+        }
+        ForkJoinWorkerThread[] ws;
+        if ((short)(ctl >>> TC_SHIFT) > -parallelism &&
+            (ws = workers) != null) {
+            for (ForkJoinWorkerThread w : ws)
+                if (w != null)
+                    count += w.drainTasksTo(c);
+        }
         return count;
     }
 
@@ -1723,14 +1870,20 @@
         long st = getStealCount();
         long qt = getQueuedTaskCount();
         long qs = getQueuedSubmissionCount();
-        int wc = workerCounts;
-        int tc = wc >>> TOTAL_COUNT_SHIFT;
-        int rc = wc & RUNNING_COUNT_MASK;
         int pc = parallelism;
-        int rs = runState;
-        int ac = rs & ACTIVE_COUNT_MASK;
+        long c = ctl;
+        int tc = pc + (short)(c >>> TC_SHIFT);
+        int rc = pc + (int)(c >> AC_SHIFT);
+        if (rc < 0) // ignore transient negative
+            rc = 0;
+        int ac = rc + blockedCount;
+        String level;
+        if ((c & STOP_BIT) != 0)
+            level = (tc == 0)? "Terminated" : "Terminating";
+        else
+            level = shutdown? "Shutting down" : "Running";
         return super.toString() +
-            "[" + runLevelToString(rs) +
+            "[" + level +
             ", parallelism = " + pc +
             ", size = " + tc +
             ", active = " + ac +
@@ -1741,13 +1894,6 @@
             "]";
     }
 
-    private static String runLevelToString(int s) {
-        return ((s & TERMINATED) != 0 ? "Terminated" :
-                ((s & TERMINATING) != 0 ? "Terminating" :
-                 ((s & SHUTDOWN) != 0 ? "Shutting down" :
-                  "Running")));
-    }
-
     /**
      * Initiates an orderly shutdown in which previously submitted
      * tasks are executed, but no new tasks will be accepted.
@@ -1762,7 +1908,7 @@
      */
     public void shutdown() {
         checkPermission();
-        advanceRunLevel(SHUTDOWN);
+        shutdown = true;
         tryTerminate(false);
     }
 
@@ -1784,6 +1930,7 @@
      */
     public List<Runnable> shutdownNow() {
         checkPermission();
+        shutdown = true;
         tryTerminate(true);
         return Collections.emptyList();
     }
@@ -1794,7 +1941,9 @@
      * @return {@code true} if all tasks have completed following shut down
      */
     public boolean isTerminated() {
-        return runState >= TERMINATED;
+        long c = ctl;
+        return ((c & STOP_BIT) != 0L &&
+                (short)(c >>> TC_SHIFT) == -parallelism);
     }
 
     /**
@@ -1811,14 +1960,16 @@
      * @return {@code true} if terminating but not yet terminated
      */
     public boolean isTerminating() {
-        return (runState & (TERMINATING|TERMINATED)) == TERMINATING;
+        long c = ctl;
+        return ((c & STOP_BIT) != 0L &&
+                (short)(c >>> TC_SHIFT) != -parallelism);
     }
 
     /**
      * Returns true if terminating or terminated. Used by ForkJoinWorkerThread.
      */
     final boolean isAtLeastTerminating() {
-        return runState >= TERMINATING;
+        return (ctl & STOP_BIT) != 0L;
     }
 
     /**
@@ -1827,7 +1978,7 @@
      * @return {@code true} if this pool has been shut down
      */
     public boolean isShutdown() {
-        return runState >= SHUTDOWN;
+        return shutdown;
     }
 
     /**
@@ -1843,12 +1994,20 @@
      */
     public boolean awaitTermination(long timeout, TimeUnit unit)
         throws InterruptedException {
+        long nanos = unit.toNanos(timeout);
+        final ReentrantLock lock = this.submissionLock;
+        lock.lock();
         try {
-            termination.awaitAdvanceInterruptibly(0, timeout, unit);
-        } catch (TimeoutException ex) {
-            return false;
+            for (;;) {
+                if (isTerminated())
+                    return true;
+                if (nanos <= 0)
+                    return false;
+                nanos = termination.awaitNanos(nanos);
+            }
+        } finally {
+            lock.unlock();
         }
-        return true;
     }
 
     /**
@@ -1859,13 +2018,15 @@
      * {@code isReleasable} must return {@code true} if blocking is
      * not necessary. Method {@code block} blocks the current thread
      * if necessary (perhaps internally invoking {@code isReleasable}
-     * before actually blocking). The unusual methods in this API
-     * accommodate synchronizers that may, but don't usually, block
-     * for long periods. Similarly, they allow more efficient internal
-     * handling of cases in which additional workers may be, but
-     * usually are not, needed to ensure sufficient parallelism.
-     * Toward this end, implementations of method {@code isReleasable}
-     * must be amenable to repeated invocation.
+     * before actually blocking). These actions are performed by any
+     * thread invoking {@link ForkJoinPool#managedBlock}.  The
+     * unusual methods in this API accommodate synchronizers that may,
+     * but don't usually, block for long periods. Similarly, they
+     * allow more efficient internal handling of cases in which
+     * additional workers may be, but usually are not, needed to
+     * ensure sufficient parallelism.  Toward this end,
+     * implementations of method {@code isReleasable} must be amenable
+     * to repeated invocation.
      *
      * <p>For example, here is a ManagedBlocker based on a
      * ReentrantLock:
@@ -1967,29 +2128,47 @@
     }
 
     // Unsafe mechanics
+    private static final sun.misc.Unsafe UNSAFE;
+    private static final long ctlOffset;
+    private static final long stealCountOffset;
+    private static final long blockedCountOffset;
+    private static final long quiescerCountOffset;
+    private static final long scanGuardOffset;
+    private static final long nextWorkerNumberOffset;
+    private static final long ABASE;
+    private static final int ASHIFT;
 
-    private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
-    private static final long workerCountsOffset =
-        objectFieldOffset("workerCounts", ForkJoinPool.class);
-    private static final long runStateOffset =
-        objectFieldOffset("runState", ForkJoinPool.class);
-    private static final long eventCountOffset =
-        objectFieldOffset("eventCount", ForkJoinPool.class);
-    private static final long eventWaitersOffset =
-        objectFieldOffset("eventWaiters", ForkJoinPool.class);
-    private static final long stealCountOffset =
-        objectFieldOffset("stealCount", ForkJoinPool.class);
-    private static final long spareWaitersOffset =
-        objectFieldOffset("spareWaiters", ForkJoinPool.class);
+    static {
+        poolNumberGenerator = new AtomicInteger();
+        workerSeedGenerator = new Random();
+        modifyThreadPermission = new RuntimePermission("modifyThread");
+        defaultForkJoinWorkerThreadFactory =
+            new DefaultForkJoinWorkerThreadFactory();
+        int s;
+        try {
+            UNSAFE = sun.misc.Unsafe.getUnsafe();
+            Class k = ForkJoinPool.class;
+            ctlOffset = UNSAFE.objectFieldOffset
+                (k.getDeclaredField("ctl"));
+            stealCountOffset = UNSAFE.objectFieldOffset
+                (k.getDeclaredField("stealCount"));
+            blockedCountOffset = UNSAFE.objectFieldOffset
+                (k.getDeclaredField("blockedCount"));
+            quiescerCountOffset = UNSAFE.objectFieldOffset
+                (k.getDeclaredField("quiescerCount"));
+            scanGuardOffset = UNSAFE.objectFieldOffset
+                (k.getDeclaredField("scanGuard"));
+            nextWorkerNumberOffset = UNSAFE.objectFieldOffset
+                (k.getDeclaredField("nextWorkerNumber"));
+            Class a = ForkJoinTask[].class;
+            ABASE = UNSAFE.arrayBaseOffset(a);
+            s = UNSAFE.arrayIndexScale(a);
+        } catch (Exception e) {
+            throw new Error(e);
+        }
+        if ((s & (s-1)) != 0)
+            throw new Error("data type scale not a power of two");
+        ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
+    }
 
-    private static long objectFieldOffset(String field, Class<?> klazz) {
-        try {
-            return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
-        } catch (NoSuchFieldException e) {
-            // Convert Exception to corresponding Error
-            NoSuchFieldError error = new NoSuchFieldError(field);
-            error.initCause(e);
-            throw error;
-        }
-    }
 }
--- a/jdk/src/share/classes/java/util/concurrent/ForkJoinTask.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/src/share/classes/java/util/concurrent/ForkJoinTask.java	Tue Mar 08 15:10:48 2011 -0800
@@ -41,7 +41,8 @@
 import java.util.List;
 import java.util.RandomAccess;
 import java.util.Map;
-import java.util.WeakHashMap;
+import java.lang.ref.WeakReference;
+import java.lang.ref.ReferenceQueue;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CancellationException;
 import java.util.concurrent.ExecutionException;
@@ -52,6 +53,8 @@
 import java.util.concurrent.RunnableFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.lang.reflect.Constructor;
 
 /**
  * Abstract base class for tasks that run within a {@link ForkJoinPool}.
@@ -95,7 +98,11 @@
  * rethrown to callers attempting to join them. These exceptions may
  * additionally include {@link RejectedExecutionException} stemming
  * from internal resource exhaustion, such as failure to allocate
- * internal task queues.
+ * internal task queues. Rethrown exceptions behave in the same way as
+ * regular exceptions, but, when possible, contain stack traces (as
+ * displayed for example using {@code ex.printStackTrace()}) of both
+ * the thread that initiated the computation as well as the thread
+ * actually encountering the exception; minimally only the latter.
  *
  * <p>The primary method for awaiting completion and extracting
  * results of a task is {@link #join}, but there are several variants:
@@ -192,8 +199,7 @@
      * status maintenance (2) execution and awaiting completion (3)
      * user-level methods that additionally report results. This is
      * sometimes hard to see because this file orders exported methods
-     * in a way that flows well in javadocs. In particular, most
-     * join mechanics are in method quietlyJoin, below.
+     * in a way that flows well in javadocs.
      */
 
     /*
@@ -215,91 +221,67 @@
 
     /** The run status of this task */
     volatile int status; // accessed directly by pool and workers
-
     private static final int NORMAL      = -1;
     private static final int CANCELLED   = -2;
     private static final int EXCEPTIONAL = -3;
     private static final int SIGNAL      =  1;
 
     /**
-     * Table of exceptions thrown by tasks, to enable reporting by
-     * callers. Because exceptions are rare, we don't directly keep
-     * them with task objects, but instead use a weak ref table.  Note
-     * that cancellation exceptions don't appear in the table, but are
-     * instead recorded as status values.
-     * TODO: Use ConcurrentReferenceHashMap
-     */
-    static final Map<ForkJoinTask<?>, Throwable> exceptionMap =
-        Collections.synchronizedMap
-        (new WeakHashMap<ForkJoinTask<?>, Throwable>());
-
-    // Maintaining completion status
-
-    /**
      * Marks completion and wakes up threads waiting to join this task,
      * also clearing signal request bits.
      *
      * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL
+     * @return completion status on exit
      */
-    private void setCompletion(int completion) {
-        int s;
-        while ((s = status) >= 0) {
+    private int setCompletion(int completion) {
+        for (int s;;) {
+            if ((s = status) < 0)
+                return s;
             if (UNSAFE.compareAndSwapInt(this, statusOffset, s, completion)) {
                 if (s != 0)
                     synchronized (this) { notifyAll(); }
-                break;
+                return completion;
             }
         }
     }
 
     /**
-     * Records exception and sets exceptional completion.
+     * Tries to block a worker thread until completed or timed out.
+     * Uses Object.wait time argument conventions.
+     * May fail on contention or interrupt.
      *
-     * @return status on exit
+     * @param millis if > 0, wait time.
      */
-    private void setExceptionalCompletion(Throwable rex) {
-        exceptionMap.put(this, rex);
-        setCompletion(EXCEPTIONAL);
-    }
-
-    /**
-     * Blocks a worker thread until completed or timed out.  Called
-     * only by pool.
-     */
-    final void internalAwaitDone(long millis, int nanos) {
-        int s = status;
-        if ((s == 0 &&
-             UNSAFE.compareAndSwapInt(this, statusOffset, 0, SIGNAL)) ||
-            s > 0)  {
-            try {     // the odd construction reduces lock bias effects
+    final void tryAwaitDone(long millis) {
+        int s;
+        try {
+            if (((s = status) > 0 ||
+                 (s == 0 &&
+                  UNSAFE.compareAndSwapInt(this, statusOffset, 0, SIGNAL))) &&
+                status > 0) {
                 synchronized (this) {
                     if (status > 0)
-                        wait(millis, nanos);
-                    else
-                        notifyAll();
+                        wait(millis);
                 }
-            } catch (InterruptedException ie) {
-                cancelIfTerminating();
             }
+        } catch (InterruptedException ie) {
+            // caller must check termination
         }
     }
 
     /**
      * Blocks a non-worker-thread until completion.
+     * @return status upon completion
      */
-    private void externalAwaitDone() {
-        if (status >= 0) {
+    private int externalAwaitDone() {
+        int s;
+        if ((s = status) >= 0) {
             boolean interrupted = false;
             synchronized (this) {
-                for (;;) {
-                    int s = status;
+                while ((s = status) >= 0) {
                     if (s == 0)
                         UNSAFE.compareAndSwapInt(this, statusOffset,
                                                  0, SIGNAL);
-                    else if (s < 0) {
-                        notifyAll();
-                        break;
-                    }
                     else {
                         try {
                             wait();
@@ -312,53 +294,308 @@
             if (interrupted)
                 Thread.currentThread().interrupt();
         }
+        return s;
     }
 
     /**
      * Blocks a non-worker-thread until completion or interruption or timeout.
      */
-    private void externalInterruptibleAwaitDone(boolean timed, long nanos)
+    private int externalInterruptibleAwaitDone(long millis)
         throws InterruptedException {
+        int s;
         if (Thread.interrupted())
             throw new InterruptedException();
-        if (status >= 0) {
-            long startTime = timed ? System.nanoTime() : 0L;
+        if ((s = status) >= 0) {
             synchronized (this) {
-                for (;;) {
-                    long nt;
-                    int s = status;
+                while ((s = status) >= 0) {
                     if (s == 0)
                         UNSAFE.compareAndSwapInt(this, statusOffset,
                                                  0, SIGNAL);
-                    else if (s < 0) {
-                        notifyAll();
+                    else {
+                        wait(millis);
+                        if (millis > 0L)
+                            break;
+                    }
+                }
+            }
+        }
+        return s;
+    }
+
+    /**
+     * Primary execution method for stolen tasks. Unless done, calls
+     * exec and records status if completed, but doesn't wait for
+     * completion otherwise.
+     */
+    final void doExec() {
+        if (status >= 0) {
+            boolean completed;
+            try {
+                completed = exec();
+            } catch (Throwable rex) {
+                setExceptionalCompletion(rex);
+                return;
+            }
+            if (completed)
+                setCompletion(NORMAL); // must be outside try block
+        }
+    }
+
+    /**
+     * Primary mechanics for join, get, quietlyJoin.
+     * @return status upon completion
+     */
+    private int doJoin() {
+        Thread t; ForkJoinWorkerThread w; int s; boolean completed;
+        if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
+            if ((s = status) < 0)
+                return s;
+            if ((w = (ForkJoinWorkerThread)t).unpushTask(this)) {
+                try {
+                    completed = exec();
+                } catch (Throwable rex) {
+                    return setExceptionalCompletion(rex);
+                }
+                if (completed)
+                    return setCompletion(NORMAL);
+            }
+            return w.joinTask(this);
+        }
+        else
+            return externalAwaitDone();
+    }
+
+    /**
+     * Primary mechanics for invoke, quietlyInvoke.
+     * @return status upon completion
+     */
+    private int doInvoke() {
+        int s; boolean completed;
+        if ((s = status) < 0)
+            return s;
+        try {
+            completed = exec();
+        } catch (Throwable rex) {
+            return setExceptionalCompletion(rex);
+        }
+        if (completed)
+            return setCompletion(NORMAL);
+        else
+            return doJoin();
+    }
+
+    // Exception table support
+
+    /**
+     * Table of exceptions thrown by tasks, to enable reporting by
+     * callers. Because exceptions are rare, we don't directly keep
+     * them with task objects, but instead use a weak ref table.  Note
+     * that cancellation exceptions don't appear in the table, but are
+     * instead recorded as status values.
+     *
+     * Note: These statics are initialized below in static block.
+     */
+    private static final ExceptionNode[] exceptionTable;
+    private static final ReentrantLock exceptionTableLock;
+    private static final ReferenceQueue<Object> exceptionTableRefQueue;
+
+    /**
+     * Fixed capacity for exceptionTable.
+     */
+    private static final int EXCEPTION_MAP_CAPACITY = 32;
+
+    /**
+     * Key-value nodes for exception table.  The chained hash table
+     * uses identity comparisons, full locking, and weak references
+     * for keys. The table has a fixed capacity because it only
+     * maintains task exceptions long enough for joiners to access
+     * them, so should never become very large for sustained
+     * periods. However, since we do not know when the last joiner
+     * completes, we must use weak references and expunge them. We do
+     * so on each operation (hence full locking). Also, some thread in
+     * any ForkJoinPool will call helpExpungeStaleExceptions when its
+     * pool becomes isQuiescent.
+     */
+    static final class ExceptionNode extends WeakReference<ForkJoinTask<?>>{
+        final Throwable ex;
+        ExceptionNode next;
+        final long thrower;  // use id not ref to avoid weak cycles
+        ExceptionNode(ForkJoinTask<?> task, Throwable ex, ExceptionNode next) {
+            super(task, exceptionTableRefQueue);
+            this.ex = ex;
+            this.next = next;
+            this.thrower = Thread.currentThread().getId();
+        }
+    }
+
+    /**
+     * Records exception and sets exceptional completion.
+     *
+     * @return status on exit
+     */
+    private int setExceptionalCompletion(Throwable ex) {
+        int h = System.identityHashCode(this);
+        final ReentrantLock lock = exceptionTableLock;
+        lock.lock();
+        try {
+            expungeStaleExceptions();
+            ExceptionNode[] t = exceptionTable;
+            int i = h & (t.length - 1);
+            for (ExceptionNode e = t[i]; ; e = e.next) {
+                if (e == null) {
+                    t[i] = new ExceptionNode(this, ex, t[i]);
+                    break;
+                }
+                if (e.get() == this) // already present
+                    break;
+            }
+        } finally {
+            lock.unlock();
+        }
+        return setCompletion(EXCEPTIONAL);
+    }
+
+    /**
+     * Removes exception node and clears status
+     */
+    private void clearExceptionalCompletion() {
+        int h = System.identityHashCode(this);
+        final ReentrantLock lock = exceptionTableLock;
+        lock.lock();
+        try {
+            ExceptionNode[] t = exceptionTable;
+            int i = h & (t.length - 1);
+            ExceptionNode e = t[i];
+            ExceptionNode pred = null;
+            while (e != null) {
+                ExceptionNode next = e.next;
+                if (e.get() == this) {
+                    if (pred == null)
+                        t[i] = next;
+                    else
+                        pred.next = next;
+                    break;
+                }
+                pred = e;
+                e = next;
+            }
+            expungeStaleExceptions();
+            status = 0;
+        } finally {
+            lock.unlock();
+        }
+    }
+
+    /**
+     * Returns a rethrowable exception for the given task, if
+     * available. To provide accurate stack traces, if the exception
+     * was not thrown by the current thread, we try to create a new
+     * exception of the same type as the one thrown, but with the
+     * recorded exception as its cause. If there is no such
+     * constructor, we instead try to use a no-arg constructor,
+     * followed by initCause, to the same effect. If none of these
+     * apply, or any fail due to other exceptions, we return the
+     * recorded exception, which is still correct, although it may
+     * contain a misleading stack trace.
+     *
+     * @return the exception, or null if none
+     */
+    private Throwable getThrowableException() {
+        if (status != EXCEPTIONAL)
+            return null;
+        int h = System.identityHashCode(this);
+        ExceptionNode e;
+        final ReentrantLock lock = exceptionTableLock;
+        lock.lock();
+        try {
+            expungeStaleExceptions();
+            ExceptionNode[] t = exceptionTable;
+            e = t[h & (t.length - 1)];
+            while (e != null && e.get() != this)
+                e = e.next;
+        } finally {
+            lock.unlock();
+        }
+        Throwable ex;
+        if (e == null || (ex = e.ex) == null)
+            return null;
+        if (e.thrower != Thread.currentThread().getId()) {
+            Class ec = ex.getClass();
+            try {
+                Constructor<?> noArgCtor = null;
+                Constructor<?>[] cs = ec.getConstructors();// public ctors only
+                for (int i = 0; i < cs.length; ++i) {
+                    Constructor<?> c = cs[i];
+                    Class<?>[] ps = c.getParameterTypes();
+                    if (ps.length == 0)
+                        noArgCtor = c;
+                    else if (ps.length == 1 && ps[0] == Throwable.class)
+                        return (Throwable)(c.newInstance(ex));
+                }
+                if (noArgCtor != null) {
+                    Throwable wx = (Throwable)(noArgCtor.newInstance());
+                    wx.initCause(ex);
+                    return wx;
+                }
+            } catch (Exception ignore) {
+            }
+        }
+        return ex;
+    }
+
+    /**
+     * Poll stale refs and remove them. Call only while holding lock.
+     */
+    private static void expungeStaleExceptions() {
+        for (Object x; (x = exceptionTableRefQueue.poll()) != null;) {
+            if (x instanceof ExceptionNode) {
+                ForkJoinTask<?> key = ((ExceptionNode)x).get();
+                ExceptionNode[] t = exceptionTable;
+                int i = System.identityHashCode(key) & (t.length - 1);
+                ExceptionNode e = t[i];
+                ExceptionNode pred = null;
+                while (e != null) {
+                    ExceptionNode next = e.next;
+                    if (e == x) {
+                        if (pred == null)
+                            t[i] = next;
+                        else
+                            pred.next = next;
                         break;
                     }
-                    else if (!timed)
-                        wait();
-                    else if ((nt = nanos - (System.nanoTime()-startTime)) > 0L)
-                        wait(nt / 1000000, (int)(nt % 1000000));
-                    else
-                        break;
+                    pred = e;
+                    e = next;
                 }
             }
         }
     }
 
     /**
-     * Unless done, calls exec and records status if completed, but
-     * doesn't wait for completion otherwise. Primary execution method
-     * for ForkJoinWorkerThread.
+     * If lock is available, poll stale refs and remove them.
+     * Called from ForkJoinPool when pools become quiescent.
      */
-    final void quietlyExec() {
-        try {
-            if (status < 0 || !exec())
-                return;
-        } catch (Throwable rex) {
-            setExceptionalCompletion(rex);
-            return;
+    static final void helpExpungeStaleExceptions() {
+        final ReentrantLock lock = exceptionTableLock;
+        if (lock.tryLock()) {
+            try {
+                expungeStaleExceptions();
+            } finally {
+                lock.unlock();
+            }
         }
-        setCompletion(NORMAL); // must be outside try block
+    }
+
+    /**
+     * Report the result of invoke or join; called only upon
+     * non-normal return of internal versions.
+     */
+    private V reportResult() {
+        int s; Throwable ex;
+        if ((s = status) == CANCELLED)
+            throw new CancellationException();
+        if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
+            UNSAFE.throwException(ex);
+        return getRawResult();
     }
 
     // public methods
@@ -399,11 +636,10 @@
      * @return the computed result
      */
     public final V join() {
-        quietlyJoin();
-        Throwable ex;
-        if (status < NORMAL && (ex = getException()) != null)
-            UNSAFE.throwException(ex);
-        return getRawResult();
+        if (doJoin() != NORMAL)
+            return reportResult();
+        else
+            return getRawResult();
     }
 
     /**
@@ -415,11 +651,10 @@
      * @return the computed result
      */
     public final V invoke() {
-        quietlyInvoke();
-        Throwable ex;
-        if (status < NORMAL && (ex = getException()) != null)
-            UNSAFE.throwException(ex);
-        return getRawResult();
+        if (doInvoke() != NORMAL)
+            return reportResult();
+        else
+            return getRawResult();
     }
 
     /**
@@ -483,22 +718,16 @@
             }
             else if (i != 0)
                 t.fork();
-            else {
-                t.quietlyInvoke();
-                if (ex == null && t.status < NORMAL)
-                    ex = t.getException();
-            }
+            else if (t.doInvoke() < NORMAL && ex == null)
+                ex = t.getException();
         }
         for (int i = 1; i <= last; ++i) {
             ForkJoinTask<?> t = tasks[i];
             if (t != null) {
                 if (ex != null)
                     t.cancel(false);
-                else {
-                    t.quietlyJoin();
-                    if (ex == null && t.status < NORMAL)
-                        ex = t.getException();
-                }
+                else if (t.doJoin() < NORMAL && ex == null)
+                    ex = t.getException();
             }
         }
         if (ex != null)
@@ -546,22 +775,16 @@
             }
             else if (i != 0)
                 t.fork();
-            else {
-                t.quietlyInvoke();
-                if (ex == null && t.status < NORMAL)
-                    ex = t.getException();
-            }
+            else if (t.doInvoke() < NORMAL && ex == null)
+                ex = t.getException();
         }
         for (int i = 1; i <= last; ++i) {
             ForkJoinTask<?> t = ts.get(i);
             if (t != null) {
                 if (ex != null)
                     t.cancel(false);
-                else {
-                    t.quietlyJoin();
-                    if (ex == null && t.status < NORMAL)
-                        ex = t.getException();
-                }
+                else if (t.doJoin() < NORMAL && ex == null)
+                    ex = t.getException();
             }
         }
         if (ex != null)
@@ -597,8 +820,7 @@
      * @return {@code true} if this task is now cancelled
      */
     public boolean cancel(boolean mayInterruptIfRunning) {
-        setCompletion(CANCELLED);
-        return status == CANCELLED;
+        return setCompletion(CANCELLED) == CANCELLED;
     }
 
     /**
@@ -614,21 +836,6 @@
         }
     }
 
-    /**
-     * Cancels if current thread is a terminating worker thread,
-     * ignoring any exceptions thrown by cancel.
-     */
-    final void cancelIfTerminating() {
-        Thread t = Thread.currentThread();
-        if ((t instanceof ForkJoinWorkerThread) &&
-            ((ForkJoinWorkerThread) t).isTerminating()) {
-            try {
-                cancel(false);
-            } catch (Throwable ignore) {
-            }
-        }
-    }
-
     public final boolean isDone() {
         return status < 0;
     }
@@ -668,7 +875,7 @@
         int s = status;
         return ((s >= NORMAL)    ? null :
                 (s == CANCELLED) ? new CancellationException() :
-                exceptionMap.get(this));
+                getThrowableException());
     }
 
     /**
@@ -726,19 +933,13 @@
      * member of a ForkJoinPool and was interrupted while waiting
      */
     public final V get() throws InterruptedException, ExecutionException {
-        Thread t = Thread.currentThread();
-        if (t instanceof ForkJoinWorkerThread)
-            quietlyJoin();
-        else
-            externalInterruptibleAwaitDone(false, 0L);
-        int s = status;
-        if (s != NORMAL) {
-            Throwable ex;
-            if (s == CANCELLED)
-                throw new CancellationException();
-            if (s == EXCEPTIONAL && (ex = exceptionMap.get(this)) != null)
-                throw new ExecutionException(ex);
-        }
+        int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ?
+            doJoin() : externalInterruptibleAwaitDone(0L);
+        Throwable ex;
+        if (s == CANCELLED)
+            throw new CancellationException();
+        if (s == EXCEPTIONAL && (ex = getThrowableException()) != null)
+            throw new ExecutionException(ex);
         return getRawResult();
     }
 
@@ -758,20 +959,39 @@
      */
     public final V get(long timeout, TimeUnit unit)
         throws InterruptedException, ExecutionException, TimeoutException {
-        long nanos = unit.toNanos(timeout);
         Thread t = Thread.currentThread();
-        if (t instanceof ForkJoinWorkerThread)
-            ((ForkJoinWorkerThread)t).joinTask(this, true, nanos);
-        else
-            externalInterruptibleAwaitDone(true, nanos);
+        if (t instanceof ForkJoinWorkerThread) {
+            ForkJoinWorkerThread w = (ForkJoinWorkerThread) t;
+            long nanos = unit.toNanos(timeout);
+            if (status >= 0) {
+                boolean completed = false;
+                if (w.unpushTask(this)) {
+                    try {
+                        completed = exec();
+                    } catch (Throwable rex) {
+                        setExceptionalCompletion(rex);
+                    }
+                }
+                if (completed)
+                    setCompletion(NORMAL);
+                else if (status >= 0 && nanos > 0)
+                    w.pool.timedAwaitJoin(this, nanos);
+            }
+        }
+        else {
+            long millis = unit.toMillis(timeout);
+            if (millis > 0)
+                externalInterruptibleAwaitDone(millis);
+        }
         int s = status;
         if (s != NORMAL) {
             Throwable ex;
             if (s == CANCELLED)
                 throw new CancellationException();
-            if (s == EXCEPTIONAL && (ex = exceptionMap.get(this)) != null)
+            if (s != EXCEPTIONAL)
+                throw new TimeoutException();
+            if ((ex = getThrowableException()) != null)
                 throw new ExecutionException(ex);
-            throw new TimeoutException();
         }
         return getRawResult();
     }
@@ -783,28 +1003,7 @@
      * known to have aborted.
      */
     public final void quietlyJoin() {
-        Thread t;
-        if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
-            ForkJoinWorkerThread w = (ForkJoinWorkerThread) t;
-            if (status >= 0) {
-                if (w.unpushTask(this)) {
-                    boolean completed;
-                    try {
-                        completed = exec();
-                    } catch (Throwable rex) {
-                        setExceptionalCompletion(rex);
-                        return;
-                    }
-                    if (completed) {
-                        setCompletion(NORMAL);
-                        return;
-                    }
-                }
-                w.joinTask(this, false, 0L);
-            }
-        }
-        else
-            externalAwaitDone();
+        doJoin();
     }
 
     /**
@@ -813,19 +1012,7 @@
      * exception.
      */
     public final void quietlyInvoke() {
-        if (status >= 0) {
-            boolean completed;
-            try {
-                completed = exec();
-            } catch (Throwable rex) {
-                setExceptionalCompletion(rex);
-                return;
-            }
-            if (completed)
-                setCompletion(NORMAL);
-            else
-                quietlyJoin();
-        }
+        doInvoke();
     }
 
     /**
@@ -864,8 +1051,9 @@
      */
     public void reinitialize() {
         if (status == EXCEPTIONAL)
-            exceptionMap.remove(this);
-        status = 0;
+            clearExceptionalCompletion();
+        else
+            status = 0;
     }
 
     /**
@@ -1176,23 +1364,23 @@
         s.defaultReadObject();
         Object ex = s.readObject();
         if (ex != null)
-            setExceptionalCompletion((Throwable) ex);
+            setExceptionalCompletion((Throwable)ex);
     }
 
     // Unsafe mechanics
-
-    private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
-    private static final long statusOffset =
-        objectFieldOffset("status", ForkJoinTask.class);
-
-    private static long objectFieldOffset(String field, Class<?> klazz) {
+    private static final sun.misc.Unsafe UNSAFE;
+    private static final long statusOffset;
+    static {
+        exceptionTableLock = new ReentrantLock();
+        exceptionTableRefQueue = new ReferenceQueue<Object>();
+        exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY];
         try {
-            return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
-        } catch (NoSuchFieldException e) {
-            // Convert Exception to corresponding Error
-            NoSuchFieldError error = new NoSuchFieldError(field);
-            error.initCause(e);
-            throw error;
+            UNSAFE = sun.misc.Unsafe.getUnsafe();
+            statusOffset = UNSAFE.objectFieldOffset
+                (ForkJoinTask.class.getDeclaredField("status"));
+        } catch (Exception e) {
+            throw new Error(e);
         }
     }
+
 }
--- a/jdk/src/share/classes/java/util/concurrent/ForkJoinWorkerThread.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/src/share/classes/java/util/concurrent/ForkJoinWorkerThread.java	Tue Mar 08 15:10:48 2011 -0800
@@ -35,9 +35,7 @@
 
 package java.util.concurrent;
 
-import java.util.Random;
 import java.util.Collection;
-import java.util.concurrent.locks.LockSupport;
 import java.util.concurrent.RejectedExecutionException;
 
 /**
@@ -84,33 +82,38 @@
      * a footprint as possible even in programs generating huge
      * numbers of tasks. To accomplish this, we shift the CAS
      * arbitrating pop vs deq (steal) from being on the indices
-     * ("base" and "sp") to the slots themselves (mainly via method
-     * "casSlotNull()"). So, both a successful pop and deq mainly
-     * entail a CAS of a slot from non-null to null.  Because we rely
-     * on CASes of references, we do not need tag bits on base or sp.
-     * They are simple ints as used in any circular array-based queue
-     * (see for example ArrayDeque).  Updates to the indices must
-     * still be ordered in a way that guarantees that sp == base means
-     * the queue is empty, but otherwise may err on the side of
-     * possibly making the queue appear nonempty when a push, pop, or
-     * deq have not fully committed. Note that this means that the deq
-     * operation, considered individually, is not wait-free. One thief
-     * cannot successfully continue until another in-progress one (or,
-     * if previously empty, a push) completes.  However, in the
+     * ("queueBase" and "queueTop") to the slots themselves (mainly
+     * via method "casSlotNull()"). So, both a successful pop and deq
+     * mainly entail a CAS of a slot from non-null to null.  Because
+     * we rely on CASes of references, we do not need tag bits on
+     * queueBase or queueTop.  They are simple ints as used in any
+     * circular array-based queue (see for example ArrayDeque).
+     * Updates to the indices must still be ordered in a way that
+     * guarantees that queueTop == queueBase means the queue is empty,
+     * but otherwise may err on the side of possibly making the queue
+     * appear nonempty when a push, pop, or deq have not fully
+     * committed. Note that this means that the deq operation,
+     * considered individually, is not wait-free. One thief cannot
+     * successfully continue until another in-progress one (or, if
+     * previously empty, a push) completes.  However, in the
      * aggregate, we ensure at least probabilistic non-blockingness.
      * If an attempted steal fails, a thief always chooses a different
      * random victim target to try next. So, in order for one thief to
      * progress, it suffices for any in-progress deq or new push on
-     * any empty queue to complete. One reason this works well here is
-     * that apparently-nonempty often means soon-to-be-stealable,
-     * which gives threads a chance to set activation status if
-     * necessary before stealing.
+     * any empty queue to complete.
      *
      * This approach also enables support for "async mode" where local
      * task processing is in FIFO, not LIFO order; simply by using a
      * version of deq rather than pop when locallyFifo is true (as set
      * by the ForkJoinPool).  This allows use in message-passing
-     * frameworks in which tasks are never joined.
+     * frameworks in which tasks are never joined.  However neither
+     * mode considers affinities, loads, cache localities, etc, so
+     * rarely provide the best possible performance on a given
+     * machine, but portably provide good throughput by averaging over
+     * these factors.  (Further, even if we did try to use such
+     * information, we do not usually have a basis for exploiting
+     * it. For example, some sets of tasks profit from cache
+     * affinities, but others are harmed by cache pollution effects.)
      *
      * When a worker would otherwise be blocked waiting to join a
      * task, it first tries a form of linear helping: Each worker
@@ -137,29 +140,26 @@
      * miss links in the chain during long-lived tasks, GC stalls etc
      * (which is OK since blocking in such cases is usually a good
      * idea).  (4) We bound the number of attempts to find work (see
-     * MAX_HELP_DEPTH) and fall back to suspending the worker and if
-     * necessary replacing it with a spare (see
-     * ForkJoinPool.awaitJoin).
+     * MAX_HELP) and fall back to suspending the worker and if
+     * necessary replacing it with another.
      *
      * Efficient implementation of these algorithms currently relies
      * on an uncomfortable amount of "Unsafe" mechanics. To maintain
-     * correct orderings, reads and writes of variable base require
-     * volatile ordering.  Variable sp does not require volatile
-     * writes but still needs store-ordering, which we accomplish by
-     * pre-incrementing sp before filling the slot with an ordered
-     * store.  (Pre-incrementing also enables backouts used in
-     * joinTask.)  Because they are protected by volatile base reads,
-     * reads of the queue array and its slots by other threads do not
-     * need volatile load semantics, but writes (in push) require
-     * store order and CASes (in pop and deq) require (volatile) CAS
-     * semantics.  (Michael, Saraswat, and Vechev's algorithm has
-     * similar properties, but without support for nulling slots.)
-     * Since these combinations aren't supported using ordinary
-     * volatiles, the only way to accomplish these efficiently is to
-     * use direct Unsafe calls. (Using external AtomicIntegers and
-     * AtomicReferenceArrays for the indices and array is
-     * significantly slower because of memory locality and indirection
-     * effects.)
+     * correct orderings, reads and writes of variable queueBase
+     * require volatile ordering.  Variable queueTop need not be
+     * volatile because non-local reads always follow those of
+     * queueBase.  Similarly, because they are protected by volatile
+     * queueBase reads, reads of the queue array and its slots by
+     * other threads do not need volatile load semantics, but writes
+     * (in push) require store order and CASes (in pop and deq)
+     * require (volatile) CAS semantics.  (Michael, Saraswat, and
+     * Vechev's algorithm has similar properties, but without support
+     * for nulling slots.)  Since these combinations aren't supported
+     * using ordinary volatiles, the only way to accomplish these
+     * efficiently is to use direct Unsafe calls. (Using external
+     * AtomicIntegers and AtomicReferenceArrays for the indices and
+     * array is significantly slower because of memory locality and
+     * indirection effects.)
      *
      * Further, performance on most platforms is very sensitive to
      * placement and sizing of the (resizable) queue array.  Even
@@ -167,30 +167,13 @@
      * initial size must be large enough to counteract cache
      * contention effects across multiple queues (especially in the
      * presence of GC cardmarking). Also, to improve thread-locality,
-     * queues are initialized after starting.  All together, these
-     * low-level implementation choices produce as much as a factor of
-     * 4 performance improvement compared to naive implementations,
-     * and enable the processing of billions of tasks per second,
-     * sometimes at the expense of ugliness.
+     * queues are initialized after starting.
      */
 
     /**
-     * Generator for initial random seeds for random victim
-     * selection. This is used only to create initial seeds. Random
-     * steals use a cheaper xorshift generator per steal attempt. We
-     * expect only rare contention on seedGenerator, so just use a
-     * plain Random.
+     * Mask for pool indices encoded as shorts
      */
-    private static final Random seedGenerator = new Random();
-
-    /**
-     * The maximum stolen->joining link depth allowed in helpJoinTask.
-     * Depths for legitimate chains are unbounded, but we use a fixed
-     * constant to avoid (otherwise unchecked) cycles and bound
-     * staleness of traversal parameters at the expense of sometimes
-     * blocking when we could be helping.
-     */
-    private static final int MAX_HELP_DEPTH = 8;
+    private static final int  SMASK  = 0xffff;
 
     /**
      * Capacity of work-stealing queue array upon initialization.
@@ -200,12 +183,19 @@
     private static final int INITIAL_QUEUE_CAPACITY = 1 << 13;
 
     /**
-     * Maximum work-stealing queue array size.  Must be less than or
-     * equal to 1 << (31 - width of array entry) to ensure lack of
-     * index wraparound. The value is set in the static block
-     * at the end of this file after obtaining width.
+     * Maximum size for queue array. Must be a power of two
+     * less than or equal to 1 << (31 - width of array entry) to
+     * ensure lack of index wraparound, but is capped at a lower
+     * value to help users trap runaway computations.
      */
-    private static final int MAXIMUM_QUEUE_CAPACITY;
+    private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 24; // 16M
+
+    /**
+     * The work-stealing queue array. Size must be a power of two.
+     * Initialized when started (as oposed to when constructed), to
+     * improve memory locality.
+     */
+    ForkJoinTask<?>[] queue;
 
     /**
      * The pool this thread works in. Accessed directly by ForkJoinTask.
@@ -213,25 +203,19 @@
     final ForkJoinPool pool;
 
     /**
-     * The work-stealing queue array. Size must be a power of two.
-     * Initialized in onStart, to improve memory locality.
+     * Index (mod queue.length) of next queue slot to push to or pop
+     * from. It is written only by owner thread, and accessed by other
+     * threads only after reading (volatile) queueBase.  Both queueTop
+     * and queueBase are allowed to wrap around on overflow, but
+     * (queueTop - queueBase) still estimates size.
      */
-    private ForkJoinTask<?>[] queue;
+    int queueTop;
 
     /**
      * Index (mod queue.length) of least valid queue slot, which is
      * always the next position to steal from if nonempty.
      */
-    private volatile int base;
-
-    /**
-     * Index (mod queue.length) of next queue slot to push to or pop
-     * from. It is written only by owner thread, and accessed by other
-     * threads only after reading (volatile) base.  Both sp and base
-     * are allowed to wrap around on overflow, but (sp - base) still
-     * estimates size.
-     */
-    private int sp;
+    volatile int queueBase;
 
     /**
      * The index of most recent stealer, used as a hint to avoid
@@ -240,92 +224,68 @@
      * of them (usually the most current). Declared non-volatile,
      * relying on other prevailing sync to keep reasonably current.
      */
-    private int stealHint;
-
-    /**
-     * Run state of this worker. In addition to the usual run levels,
-     * tracks if this worker is suspended as a spare, and if it was
-     * killed (trimmed) while suspended. However, "active" status is
-     * maintained separately and modified only in conjunction with
-     * CASes of the pool's runState (which are currently sadly
-     * manually inlined for performance.)  Accessed directly by pool
-     * to simplify checks for normal (zero) status.
-     */
-    volatile int runState;
-
-    private static final int TERMINATING = 0x01;
-    private static final int TERMINATED  = 0x02;
-    private static final int SUSPENDED   = 0x04; // inactive spare
-    private static final int TRIMMED     = 0x08; // killed while suspended
-
-    /**
-     * Number of steals. Directly accessed (and reset) by
-     * pool.tryAccumulateStealCount when idle.
-     */
-    int stealCount;
-
-    /**
-     * Seed for random number generator for choosing steal victims.
-     * Uses Marsaglia xorshift. Must be initialized as nonzero.
-     */
-    private int seed;
-
-    /**
-     * Activity status. When true, this worker is considered active.
-     * Accessed directly by pool.  Must be false upon construction.
-     */
-    boolean active;
-
-    /**
-     * True if use local fifo, not default lifo, for local polling.
-     * Shadows value from ForkJoinPool.
-     */
-    private final boolean locallyFifo;
+    int stealHint;
 
     /**
      * Index of this worker in pool array. Set once by pool before
      * running, and accessed directly by pool to locate this worker in
      * its workers array.
      */
-    int poolIndex;
+    final int poolIndex;
+
+    /**
+     * Encoded record for pool task waits. Usages are always
+     * surrounded by volatile reads/writes
+     */
+    int nextWait;
 
     /**
-     * The last pool event waited for. Accessed only by pool in
-     * callback methods invoked within this thread.
+     * Complement of poolIndex, offset by count of entries of task
+     * waits. Accessed by ForkJoinPool to manage event waiters.
      */
-    int lastEventCount;
+    volatile int eventCount;
+
+    /**
+     * Seed for random number generator for choosing steal victims.
+     * Uses Marsaglia xorshift. Must be initialized as nonzero.
+     */
+    int seed;
 
     /**
-     * Encoded index and event count of next event waiter. Accessed
-     * only by ForkJoinPool for managing event waiters.
+     * Number of steals. Directly accessed (and reset) by pool when
+     * idle.
      */
-    volatile long nextWaiter;
+    int stealCount;
+
+    /**
+     * True if this worker should or did terminate
+     */
+    volatile boolean terminate;
 
     /**
-     * Number of times this thread suspended as spare. Accessed only
-     * by pool.
+     * Set to true before LockSupport.park; false on return
      */
-    int spareCount;
+    volatile boolean parked;
 
     /**
-     * Encoded index and count of next spare waiter. Accessed only
-     * by ForkJoinPool for managing spares.
+     * True if use local fifo, not default lifo, for local polling.
+     * Shadows value from ForkJoinPool.
      */
-    volatile int nextSpare;
+    final boolean locallyFifo;
+
+    /**
+     * The task most recently stolen from another worker (or
+     * submission queue).  All uses are surrounded by enough volatile
+     * reads/writes to maintain as non-volatile.
+     */
+    ForkJoinTask<?> currentSteal;
 
     /**
      * The task currently being joined, set only when actively trying
-     * to help other stealers in helpJoinTask. Written only by this
-     * thread, but read by others.
+     * to help other stealers in helpJoinTask. All uses are surrounded
+     * by enough volatile reads/writes to maintain as non-volatile.
      */
-    private volatile ForkJoinTask<?> currentJoin;
-
-    /**
-     * The task most recently stolen from another worker (or
-     * submission queue).  Written only by this thread, but read by
-     * others.
-     */
-    private volatile ForkJoinTask<?> currentSteal;
+    ForkJoinTask<?> currentJoin;
 
     /**
      * Creates a ForkJoinWorkerThread operating in the given pool.
@@ -334,24 +294,19 @@
      * @throws NullPointerException if pool is null
      */
     protected ForkJoinWorkerThread(ForkJoinPool pool) {
+        super(pool.nextWorkerName());
         this.pool = pool;
-        this.locallyFifo = pool.locallyFifo;
+        int k = pool.registerWorker(this);
+        poolIndex = k;
+        eventCount = ~k & SMASK; // clear wait count
+        locallyFifo = pool.locallyFifo;
+        Thread.UncaughtExceptionHandler ueh = pool.ueh;
+        if (ueh != null)
+            setUncaughtExceptionHandler(ueh);
         setDaemon(true);
-        // To avoid exposing construction details to subclasses,
-        // remaining initialization is in start() and onStart()
     }
 
-    /**
-     * Performs additional initialization and starts this thread.
-     */
-    final void start(int poolIndex, UncaughtExceptionHandler ueh) {
-        this.poolIndex = poolIndex;
-        if (ueh != null)
-            setUncaughtExceptionHandler(ueh);
-        start();
-    }
-
-    // Public/protected methods
+    // Public methods
 
     /**
      * Returns the pool hosting this thread.
@@ -375,6 +330,25 @@
         return poolIndex;
     }
 
+    // Randomization
+
+    /**
+     * Computes next value for random victim probes and backoffs.
+     * Scans don't require a very high quality generator, but also not
+     * a crummy one.  Marsaglia xor-shift is cheap and works well
+     * enough.  Note: This is manually inlined in FJP.scan() to avoid
+     * writes inside busy loops.
+     */
+    private int nextSeed() {
+        int r = seed;
+        r ^= r << 13;
+        r ^= r >>> 17;
+        r ^= r << 5;
+        return seed = r;
+    }
+
+    // Run State management
+
     /**
      * Initializes internal state after construction but before
      * processing any tasks. If you override this method, you must
@@ -385,15 +359,9 @@
      * processing tasks.
      */
     protected void onStart() {
-        int rs = seedGenerator.nextInt();
-        seed = (rs == 0) ? 1 : rs; // seed must be nonzero
-
-        // Allocate name string and arrays in this thread
-        String pid = Integer.toString(pool.getPoolNumber());
-        String wid = Integer.toString(poolIndex);
-        setName("ForkJoinPool-" + pid + "-worker-" + wid);
-
         queue = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
+        int r = pool.workerSeedGenerator.nextInt();
+        seed = (r == 0)? 1 : r; //  must be nonzero
     }
 
     /**
@@ -406,16 +374,9 @@
      */
     protected void onTermination(Throwable exception) {
         try {
-            ForkJoinPool p = pool;
-            if (active) {
-                int a; // inline p.tryDecrementActiveCount
-                active = false;
-                do {} while (!UNSAFE.compareAndSwapInt
-                             (p, poolRunStateOffset, a = p.runState, a - 1));
-            }
+            terminate = true;
             cancelTasks();
-            setTerminated();
-            p.workerTerminated(this);
+            pool.deregisterWorker(this, exception);
         } catch (Throwable ex) {        // Shouldn't ever happen
             if (exception == null)      // but if so, at least rethrown
                 exception = ex;
@@ -434,7 +395,7 @@
         Throwable exception = null;
         try {
             onStart();
-            mainLoop();
+            pool.work(this);
         } catch (Throwable ex) {
             exception = ex;
         } finally {
@@ -442,81 +403,6 @@
         }
     }
 
-    // helpers for run()
-
-    /**
-     * Finds and executes tasks, and checks status while running.
-     */
-    private void mainLoop() {
-        boolean ran = false; // true if ran a task on last step
-        ForkJoinPool p = pool;
-        for (;;) {
-            p.preStep(this, ran);
-            if (runState != 0)
-                break;
-            ran = tryExecSteal() || tryExecSubmission();
-        }
-    }
-
-    /**
-     * Tries to steal a task and execute it.
-     *
-     * @return true if ran a task
-     */
-    private boolean tryExecSteal() {
-        ForkJoinTask<?> t;
-        if ((t = scan()) != null) {
-            t.quietlyExec();
-            UNSAFE.putOrderedObject(this, currentStealOffset, null);
-            if (sp != base)
-                execLocalTasks();
-            return true;
-        }
-        return false;
-    }
-
-    /**
-     * If a submission exists, try to activate and run it.
-     *
-     * @return true if ran a task
-     */
-    private boolean tryExecSubmission() {
-        ForkJoinPool p = pool;
-        // This loop is needed in case attempt to activate fails, in
-        // which case we only retry if there still appears to be a
-        // submission.
-        while (p.hasQueuedSubmissions()) {
-            ForkJoinTask<?> t; int a;
-            if (active || // inline p.tryIncrementActiveCount
-                (active = UNSAFE.compareAndSwapInt(p, poolRunStateOffset,
-                                                   a = p.runState, a + 1))) {
-                if ((t = p.pollSubmission()) != null) {
-                    UNSAFE.putOrderedObject(this, currentStealOffset, t);
-                    t.quietlyExec();
-                    UNSAFE.putOrderedObject(this, currentStealOffset, null);
-                    if (sp != base)
-                        execLocalTasks();
-                    return true;
-                }
-            }
-        }
-        return false;
-    }
-
-    /**
-     * Runs local tasks until queue is empty or shut down.  Call only
-     * while active.
-     */
-    private void execLocalTasks() {
-        while (runState == 0) {
-            ForkJoinTask<?> t = locallyFifo ? locallyDeqTask() : popTask();
-            if (t != null)
-                t.quietlyExec();
-            else if (sp == base)
-                break;
-        }
-    }
-
     /*
      * Intrinsics-based atomic writes for queue slots. These are
      * basically the same as methods in AtomicReferenceArray, but
@@ -528,10 +414,20 @@
      * because they are protected by other volatile reads and are
      * confirmed by CASes.
      *
-     * Most uses don't actually call these methods, but instead contain
-     * inlined forms that enable more predictable optimization.  We
-     * don't define the version of write used in pushTask at all, but
-     * instead inline there a store-fenced array slot write.
+     * Most uses don't actually call these methods, but instead
+     * contain inlined forms that enable more predictable
+     * optimization.  We don't define the version of write used in
+     * pushTask at all, but instead inline there a store-fenced array
+     * slot write.
+     *
+     * Also in most methods, as a performance (not correctness) issue,
+     * we'd like to encourage compilers not to arbitrarily postpone
+     * setting queueTop after writing slot.  Currently there is no
+     * intrinsic for arranging this, but using Unsafe putOrderedInt
+     * may be a preferable strategy on some compilers even though its
+     * main effect is a pre-, not post- fence. To simplify possible
+     * changes, the option is left in comments next to the associated
+     * assignments.
      */
 
     /**
@@ -540,7 +436,7 @@
      */
     private static final boolean casSlotNull(ForkJoinTask<?>[] q, int i,
                                              ForkJoinTask<?> t) {
-        return UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null);
+        return UNSAFE.compareAndSwapObject(q, (i << ASHIFT) + ABASE, t, null);
     }
 
     /**
@@ -550,7 +446,7 @@
      */
     private static final void writeSlot(ForkJoinTask<?>[] q, int i,
                                         ForkJoinTask<?> t) {
-        UNSAFE.putObjectVolatile(q, (i << qShift) + qBase, t);
+        UNSAFE.putObjectVolatile(q, (i << ASHIFT) + ABASE, t);
     }
 
     // queue methods
@@ -561,14 +457,43 @@
      * @param t the task. Caller must ensure non-null.
      */
     final void pushTask(ForkJoinTask<?> t) {
-        ForkJoinTask<?>[] q = queue;
-        int mask = q.length - 1; // implicit assert q != null
-        int s = sp++;            // ok to increment sp before slot write
-        UNSAFE.putOrderedObject(q, ((s & mask) << qShift) + qBase, t);
-        if ((s -= base) == 0)
-            pool.signalWork();   // was empty
-        else if (s == mask)
-            growQueue();         // is full
+        ForkJoinTask<?>[] q; int s, m;
+        if ((q = queue) != null) {    // ignore if queue removed
+            long u = (((s = queueTop) & (m = q.length - 1)) << ASHIFT) + ABASE;
+            UNSAFE.putOrderedObject(q, u, t);
+            queueTop = s + 1;         // or use putOrderedInt
+            if ((s -= queueBase) <= 2)
+                pool.signalWork();
+            else if (s == m)
+                growQueue();
+        }
+    }
+
+    /**
+     * Creates or doubles queue array.  Transfers elements by
+     * emulating steals (deqs) from old array and placing, oldest
+     * first, into new array.
+     */
+    private void growQueue() {
+        ForkJoinTask<?>[] oldQ = queue;
+        int size = oldQ != null ? oldQ.length << 1 : INITIAL_QUEUE_CAPACITY;
+        if (size > MAXIMUM_QUEUE_CAPACITY)
+            throw new RejectedExecutionException("Queue capacity exceeded");
+        if (size < INITIAL_QUEUE_CAPACITY)
+            size = INITIAL_QUEUE_CAPACITY;
+        ForkJoinTask<?>[] q = queue = new ForkJoinTask<?>[size];
+        int mask = size - 1;
+        int top = queueTop;
+        int oldMask;
+        if (oldQ != null && (oldMask = oldQ.length - 1) >= 0) {
+            for (int b = queueBase; b != top; ++b) {
+                long u = ((b & oldMask) << ASHIFT) + ABASE;
+                Object x = UNSAFE.getObjectVolatile(oldQ, u);
+                if (x != null && UNSAFE.compareAndSwapObject(oldQ, u, x, null))
+                    UNSAFE.putObjectVolatile
+                        (q, ((b & mask) << ASHIFT) + ABASE, x);
+            }
+        }
     }
 
     /**
@@ -579,35 +504,34 @@
      * @return a task, or null if none or contended
      */
     final ForkJoinTask<?> deqTask() {
-        ForkJoinTask<?> t;
-        ForkJoinTask<?>[] q;
-        int b, i;
-        if (sp != (b = base) &&
+        ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i;
+        if (queueTop != (b = queueBase) &&
             (q = queue) != null && // must read q after b
-            (t = q[i = (q.length - 1) & b]) != null && base == b &&
-            UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase, t, null)) {
-            base = b + 1;
+            (i = (q.length - 1) & b) >= 0 &&
+            (t = q[i]) != null && queueBase == b &&
+            UNSAFE.compareAndSwapObject(q, (i << ASHIFT) + ABASE, t, null)) {
+            queueBase = b + 1;
             return t;
         }
         return null;
     }
 
     /**
-     * Tries to take a task from the base of own queue. Assumes active
-     * status.  Called only by this thread.
+     * Tries to take a task from the base of own queue.  Called only
+     * by this thread.
      *
      * @return a task, or null if none
      */
     final ForkJoinTask<?> locallyDeqTask() {
+        ForkJoinTask<?> t; int m, b, i;
         ForkJoinTask<?>[] q = queue;
-        if (q != null) {
-            ForkJoinTask<?> t;
-            int b, i;
-            while (sp != (b = base)) {
-                if ((t = q[i = (q.length - 1) & b]) != null && base == b &&
-                    UNSAFE.compareAndSwapObject(q, (i << qShift) + qBase,
+        if (q != null && (m = q.length - 1) >= 0) {
+            while (queueTop != (b = queueBase)) {
+                if ((t = q[i = m & b]) != null &&
+                    queueBase == b &&
+                    UNSAFE.compareAndSwapObject(q, (i << ASHIFT) + ABASE,
                                                 t, null)) {
-                    base = b + 1;
+                    queueBase = b + 1;
                     return t;
                 }
             }
@@ -616,35 +540,21 @@
     }
 
     /**
-     * Returns a popped task, or null if empty. Assumes active status.
+     * Returns a popped task, or null if empty.
      * Called only by this thread.
      */
     private ForkJoinTask<?> popTask() {
+        int m;
         ForkJoinTask<?>[] q = queue;
-        if (q != null) {
-            int s;
-            while ((s = sp) != base) {
-                int i = (q.length - 1) & --s;
-                long u = (i << qShift) + qBase; // raw offset
+        if (q != null && (m = q.length - 1) >= 0) {
+            for (int s; (s = queueTop) != queueBase;) {
+                int i = m & --s;
+                long u = (i << ASHIFT) + ABASE; // raw offset
                 ForkJoinTask<?> t = q[i];
                 if (t == null)   // lost to stealer
                     break;
                 if (UNSAFE.compareAndSwapObject(q, u, t, null)) {
-                    /*
-                     * Note: here and in related methods, as a
-                     * performance (not correctness) issue, we'd like
-                     * to encourage compiler not to arbitrarily
-                     * postpone setting sp after successful CAS.
-                     * Currently there is no intrinsic for arranging
-                     * this, but using Unsafe putOrderedInt may be a
-                     * preferable strategy on some compilers even
-                     * though its main effect is a pre-, not post-
-                     * fence. To simplify possible changes, the option
-                     * is left in comments next to the associated
-                     * assignments.
-                     */
-                    sp = s; // putOrderedInt may encourage more timely write
-                    // UNSAFE.putOrderedInt(this, spOffset, s);
+                    queueTop = s; // or putOrderedInt
                     return t;
                 }
             }
@@ -654,18 +564,17 @@
 
     /**
      * Specialized version of popTask to pop only if topmost element
-     * is the given task. Called only by this thread while active.
+     * is the given task. Called only by this thread.
      *
      * @param t the task. Caller must ensure non-null.
      */
     final boolean unpushTask(ForkJoinTask<?> t) {
+        ForkJoinTask<?>[] q;
         int s;
-        ForkJoinTask<?>[] q = queue;
-        if ((s = sp) != base && q != null &&
+        if ((q = queue) != null && (s = queueTop) != queueBase &&
             UNSAFE.compareAndSwapObject
-            (q, (((q.length - 1) & --s) << qShift) + qBase, t, null)) {
-            sp = s; // putOrderedInt may encourage more timely write
-            // UNSAFE.putOrderedInt(this, spOffset, s);
+            (q, (((q.length - 1) & --s) << ASHIFT) + ABASE, t, null)) {
+            queueTop = s; // or putOrderedInt
             return true;
         }
         return false;
@@ -675,222 +584,30 @@
      * Returns next task, or null if empty or contended.
      */
     final ForkJoinTask<?> peekTask() {
+        int m;
         ForkJoinTask<?>[] q = queue;
-        if (q == null)
+        if (q == null || (m = q.length - 1) < 0)
             return null;
-        int mask = q.length - 1;
-        int i = locallyFifo ? base : (sp - 1);
-        return q[i & mask];
+        int i = locallyFifo ? queueBase : (queueTop - 1);
+        return q[i & m];
     }
 
-    /**
-     * Doubles queue array size. Transfers elements by emulating
-     * steals (deqs) from old array and placing, oldest first, into
-     * new array.
-     */
-    private void growQueue() {
-        ForkJoinTask<?>[] oldQ = queue;
-        int oldSize = oldQ.length;
-        int newSize = oldSize << 1;
-        if (newSize > MAXIMUM_QUEUE_CAPACITY)
-            throw new RejectedExecutionException("Queue capacity exceeded");
-        ForkJoinTask<?>[] newQ = queue = new ForkJoinTask<?>[newSize];
-
-        int b = base;
-        int bf = b + oldSize;
-        int oldMask = oldSize - 1;
-        int newMask = newSize - 1;
-        do {
-            int oldIndex = b & oldMask;
-            ForkJoinTask<?> t = oldQ[oldIndex];
-            if (t != null && !casSlotNull(oldQ, oldIndex, t))
-                t = null;
-            writeSlot(newQ, b & newMask, t);
-        } while (++b != bf);
-        pool.signalWork();
-    }
-
-    /**
-     * Computes next value for random victim probe in scan().  Scans
-     * don't require a very high quality generator, but also not a
-     * crummy one.  Marsaglia xor-shift is cheap and works well enough.
-     * Note: This is manually inlined in scan().
-     */
-    private static final int xorShift(int r) {
-        r ^= r << 13;
-        r ^= r >>> 17;
-        return r ^ (r << 5);
-    }
+    // Support methods for ForkJoinPool
 
     /**
-     * Tries to steal a task from another worker. Starts at a random
-     * index of workers array, and probes workers until finding one
-     * with non-empty queue or finding that all are empty.  It
-     * randomly selects the first n probes. If these are empty, it
-     * resorts to a circular sweep, which is necessary to accurately
-     * set active status. (The circular sweep uses steps of
-     * approximately half the array size plus 1, to avoid bias
-     * stemming from leftmost packing of the array in ForkJoinPool.)
-     *
-     * This method must be both fast and quiet -- usually avoiding
-     * memory accesses that could disrupt cache sharing etc other than
-     * those needed to check for and take tasks (or to activate if not
-     * already active). This accounts for, among other things,
-     * updating random seed in place without storing it until exit.
-     *
-     * @return a task, or null if none found
+     * Runs the given task, plus any local tasks until queue is empty
      */
-    private ForkJoinTask<?> scan() {
-        ForkJoinPool p = pool;
-        ForkJoinWorkerThread[] ws;        // worker array
-        int n;                            // upper bound of #workers
-        if ((ws = p.workers) != null && (n = ws.length) > 1) {
-            boolean canSteal = active;    // shadow active status
-            int r = seed;                 // extract seed once
-            int mask = n - 1;
-            int j = -n;                   // loop counter
-            int k = r;                    // worker index, random if j < 0
-            for (;;) {
-                ForkJoinWorkerThread v = ws[k & mask];
-                r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // inline xorshift
-                ForkJoinTask<?>[] q; ForkJoinTask<?> t; int b, a;
-                if (v != null && (b = v.base) != v.sp &&
-                    (q = v.queue) != null) {
-                    int i = (q.length - 1) & b;
-                    long u = (i << qShift) + qBase; // raw offset
-                    int pid = poolIndex;
-                    if ((t = q[i]) != null) {
-                        if (!canSteal &&  // inline p.tryIncrementActiveCount
-                            UNSAFE.compareAndSwapInt(p, poolRunStateOffset,
-                                                     a = p.runState, a + 1))
-                            canSteal = active = true;
-                        if (canSteal && v.base == b++ &&
-                            UNSAFE.compareAndSwapObject(q, u, t, null)) {
-                            v.base = b;
-                            v.stealHint = pid;
-                            UNSAFE.putOrderedObject(this,
-                                                    currentStealOffset, t);
-                            seed = r;
-                            ++stealCount;
-                            return t;
-                        }
-                    }
-                    j = -n;
-                    k = r;                // restart on contention
-                }
-                else if (++j <= 0)
-                    k = r;
-                else if (j <= n)
-                    k += (n >>> 1) | 1;
-                else
-                    break;
-            }
-        }
-        return null;
-    }
-
-    // Run State management
-
-    // status check methods used mainly by ForkJoinPool
-    final boolean isRunning()    { return runState == 0; }
-    final boolean isTerminated() { return (runState & TERMINATED) != 0; }
-    final boolean isSuspended()  { return (runState & SUSPENDED) != 0; }
-    final boolean isTrimmed()    { return (runState & TRIMMED) != 0; }
-
-    final boolean isTerminating() {
-        if ((runState & TERMINATING) != 0)
-            return true;
-        if (pool.isAtLeastTerminating()) { // propagate pool state
-            shutdown();
-            return true;
+    final void execTask(ForkJoinTask<?> t) {
+        currentSteal = t;
+        for (;;) {
+            if (t != null)
+                t.doExec();
+            if (queueTop == queueBase)
+                break;
+            t = locallyFifo ? locallyDeqTask() : popTask();
         }
-        return false;
-    }
-
-    /**
-     * Sets state to TERMINATING. Does NOT unpark or interrupt
-     * to wake up if currently blocked. Callers must do so if desired.
-     */
-    final void shutdown() {
-        for (;;) {
-            int s = runState;
-            if ((s & (TERMINATING|TERMINATED)) != 0)
-                break;
-            if ((s & SUSPENDED) != 0) { // kill and wakeup if suspended
-                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
-                                             (s & ~SUSPENDED) |
-                                             (TRIMMED|TERMINATING)))
-                    break;
-            }
-            else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
-                                              s | TERMINATING))
-                break;
-        }
-    }
-
-    /**
-     * Sets state to TERMINATED. Called only by onTermination().
-     */
-    private void setTerminated() {
-        int s;
-        do {} while (!UNSAFE.compareAndSwapInt(this, runStateOffset,
-                                               s = runState,
-                                               s | (TERMINATING|TERMINATED)));
-    }
-
-    /**
-     * If suspended, tries to set status to unsuspended.
-     * Does NOT wake up if blocked.
-     *
-     * @return true if successful
-     */
-    final boolean tryUnsuspend() {
-        int s;
-        while (((s = runState) & SUSPENDED) != 0) {
-            if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
-                                         s & ~SUSPENDED))
-                return true;
-        }
-        return false;
-    }
-
-    /**
-     * Sets suspended status and blocks as spare until resumed
-     * or shutdown.
-     */
-    final void suspendAsSpare() {
-        for (;;) {                  // set suspended unless terminating
-            int s = runState;
-            if ((s & TERMINATING) != 0) { // must kill
-                if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
-                                             s | (TRIMMED | TERMINATING)))
-                    return;
-            }
-            else if (UNSAFE.compareAndSwapInt(this, runStateOffset, s,
-                                              s | SUSPENDED))
-                break;
-        }
-        ForkJoinPool p = pool;
-        p.pushSpare(this);
-        while ((runState & SUSPENDED) != 0) {
-            if (p.tryAccumulateStealCount(this)) {
-                interrupted();          // clear/ignore interrupts
-                if ((runState & SUSPENDED) == 0)
-                    break;
-                LockSupport.park(this);
-            }
-        }
-    }
-
-    // Misc support methods for ForkJoinPool
-
-    /**
-     * Returns an estimate of the number of tasks in the queue.  Also
-     * used by ForkJoinTask.
-     */
-    final int getQueueSize() {
-        int n; // external calls must read base first
-        return (n = -base + sp) <= 0 ? 0 : n;
+        ++stealCount;
+        currentSteal = null;
     }
 
     /**
@@ -899,17 +616,12 @@
      */
     final void cancelTasks() {
         ForkJoinTask<?> cj = currentJoin; // try to cancel ongoing tasks
-        if (cj != null && cj.status >= 0) {
+        if (cj != null && cj.status >= 0)
             cj.cancelIgnoringExceptions();
-            try {
-                this.interrupt(); // awaken wait
-            } catch (SecurityException ignore) {
-            }
-        }
         ForkJoinTask<?> cs = currentSteal;
         if (cs != null && cs.status >= 0)
             cs.cancelIgnoringExceptions();
-        while (base != sp) {
+        while (queueBase != queueTop) {
             ForkJoinTask<?> t = deqTask();
             if (t != null)
                 t.cancelIgnoringExceptions();
@@ -923,7 +635,7 @@
      */
     final int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
         int n = 0;
-        while (base != sp) {
+        while (queueBase != queueTop) {
             ForkJoinTask<?> t = deqTask();
             if (t != null) {
                 c.add(t);
@@ -936,20 +648,19 @@
     // Support methods for ForkJoinTask
 
     /**
+     * Returns an estimate of the number of tasks in the queue.
+     */
+    final int getQueueSize() {
+        return queueTop - queueBase;
+    }
+
+    /**
      * Gets and removes a local task.
      *
      * @return a task, if available
      */
     final ForkJoinTask<?> pollLocalTask() {
-        ForkJoinPool p = pool;
-        while (sp != base) {
-            int a; // inline p.tryIncrementActiveCount
-            if (active ||
-                (active = UNSAFE.compareAndSwapInt(p, poolRunStateOffset,
-                                                   a = p.runState, a + 1)))
-                return locallyFifo ? locallyDeqTask() : popTask();
-        }
-        return null;
+        return locallyFifo ? locallyDeqTask() : popTask();
     }
 
     /**
@@ -958,172 +669,205 @@
      * @return a task, if available
      */
     final ForkJoinTask<?> pollTask() {
+        ForkJoinWorkerThread[] ws;
         ForkJoinTask<?> t = pollLocalTask();
-        if (t == null) {
-            t = scan();
-            // cannot retain/track/help steal
-            UNSAFE.putOrderedObject(this, currentStealOffset, null);
+        if (t != null || (ws = pool.workers) == null)
+            return t;
+        int n = ws.length; // cheap version of FJP.scan
+        int steps = n << 1;
+        int r = nextSeed();
+        int i = 0;
+        while (i < steps) {
+            ForkJoinWorkerThread w = ws[(i++ + r) & (n - 1)];
+            if (w != null && w.queueBase != w.queueTop && w.queue != null) {
+                if ((t = w.deqTask()) != null)
+                    return t;
+                i = 0;
+            }
         }
-        return t;
+        return null;
     }
 
     /**
-     * Possibly runs some tasks and/or blocks, until task is done.
+     * The maximum stolen->joining link depth allowed in helpJoinTask,
+     * as well as the maximum number of retries (allowing on average
+     * one staleness retry per level) per attempt to instead try
+     * compensation.  Depths for legitimate chains are unbounded, but
+     * we use a fixed constant to avoid (otherwise unchecked) cycles
+     * and bound staleness of traversal parameters at the expense of
+     * sometimes blocking when we could be helping.
+     */
+    private static final int MAX_HELP = 16;
+
+    /**
+     * Possibly runs some tasks and/or blocks, until joinMe is done.
      *
      * @param joinMe the task to join
-     * @param timed true if use timed wait
-     * @param nanos wait time if timed
+     * @return completion status on exit
      */
-    final void joinTask(ForkJoinTask<?> joinMe, boolean timed, long nanos) {
-        // currentJoin only written by this thread; only need ordered store
+    final int joinTask(ForkJoinTask<?> joinMe) {
         ForkJoinTask<?> prevJoin = currentJoin;
-        UNSAFE.putOrderedObject(this, currentJoinOffset, joinMe);
-        pool.awaitJoin(joinMe, this, timed, nanos);
-        UNSAFE.putOrderedObject(this, currentJoinOffset, prevJoin);
+        currentJoin = joinMe;
+        for (int s, retries = MAX_HELP;;) {
+            if ((s = joinMe.status) < 0) {
+                currentJoin = prevJoin;
+                return s;
+            }
+            if (retries > 0) {
+                if (queueTop != queueBase) {
+                    if (!localHelpJoinTask(joinMe))
+                        retries = 0;           // cannot help
+                }
+                else if (retries == MAX_HELP >>> 1) {
+                    --retries;                 // check uncommon case
+                    if (tryDeqAndExec(joinMe) >= 0)
+                        Thread.yield();        // for politeness
+                }
+                else
+                    retries = helpJoinTask(joinMe)? MAX_HELP : retries - 1;
+            }
+            else {
+                retries = MAX_HELP;           // restart if not done
+                pool.tryAwaitJoin(joinMe);
+            }
+        }
+    }
+
+    /**
+     * If present, pops and executes the given task, or any other
+     * cancelled task
+     *
+     * @return false if any other non-cancelled task exists in local queue
+     */
+    private boolean localHelpJoinTask(ForkJoinTask<?> joinMe) {
+        int s, i; ForkJoinTask<?>[] q; ForkJoinTask<?> t;
+        if ((s = queueTop) != queueBase && (q = queue) != null &&
+            (i = (q.length - 1) & --s) >= 0 &&
+            (t = q[i]) != null) {
+            if (t != joinMe && t.status >= 0)
+                return false;
+            if (UNSAFE.compareAndSwapObject
+                (q, (i << ASHIFT) + ABASE, t, null)) {
+                queueTop = s;           // or putOrderedInt
+                t.doExec();
+            }
+        }
+        return true;
     }
 
     /**
-     * Tries to locate and help perform tasks for a stealer of the
-     * given task, or in turn one of its stealers.  Traces
+     * Tries to locate and execute tasks for a stealer of the given
+     * task, or in turn one of its stealers, Traces
      * currentSteal->currentJoin links looking for a thread working on
      * a descendant of the given task and with a non-empty queue to
-     * steal back and execute tasks from.
-     *
-     * The implementation is very branchy to cope with potential
-     * inconsistencies or loops encountering chains that are stale,
-     * unknown, or of length greater than MAX_HELP_DEPTH links.  All
-     * of these cases are dealt with by just returning back to the
-     * caller, who is expected to retry if other join mechanisms also
-     * don't work out.
+     * steal back and execute tasks from.  The implementation is very
+     * branchy to cope with potential inconsistencies or loops
+     * encountering chains that are stale, unknown, or of length
+     * greater than MAX_HELP links.  All of these cases are dealt with
+     * by just retrying by caller.
      *
      * @param joinMe the task to join
-     * @param running if false, then must update pool count upon
-     *  running a task
-     * @return value of running on exit
+     * @param canSteal true if local queue is empty
+     * @return true if ran a task
      */
-    final boolean helpJoinTask(ForkJoinTask<?> joinMe, boolean running) {
-        /*
-         * Initial checks to (1) abort if terminating; (2) clean out
-         * old cancelled tasks from local queue; (3) if joinMe is next
-         * task, run it; (4) omit scan if local queue nonempty (since
-         * it may contain non-descendents of joinMe).
-         */
-        ForkJoinPool p = pool;
-        for (;;) {
-            ForkJoinTask<?>[] q;
-            int s;
-            if (joinMe.status < 0)
-                return running;
-            else if ((runState & TERMINATING) != 0) {
-                joinMe.cancelIgnoringExceptions();
-                return running;
+    private boolean helpJoinTask(ForkJoinTask<?> joinMe) {
+        boolean helped = false;
+        int m = pool.scanGuard & SMASK;
+        ForkJoinWorkerThread[] ws = pool.workers;
+        if (ws != null && ws.length > m && joinMe.status >= 0) {
+            int levels = MAX_HELP;              // remaining chain length
+            ForkJoinTask<?> task = joinMe;      // base of chain
+            outer:for (ForkJoinWorkerThread thread = this;;) {
+                // Try to find v, the stealer of task, by first using hint
+                ForkJoinWorkerThread v = ws[thread.stealHint & m];
+                if (v == null || v.currentSteal != task) {
+                    for (int j = 0; ;) {        // search array
+                        if ((v = ws[j]) != null && v.currentSteal == task) {
+                            thread.stealHint = j;
+                            break;              // save hint for next time
+                        }
+                        if (++j > m)
+                            break outer;        // can't find stealer
+                    }
+                }
+                // Try to help v, using specialized form of deqTask
+                for (;;) {
+                    ForkJoinTask<?>[] q; int b, i;
+                    if (joinMe.status < 0)
+                        break outer;
+                    if ((b = v.queueBase) == v.queueTop ||
+                        (q = v.queue) == null ||
+                        (i = (q.length-1) & b) < 0)
+                        break;                  // empty
+                    long u = (i << ASHIFT) + ABASE;
+                    ForkJoinTask<?> t = q[i];
+                    if (task.status < 0)
+                        break outer;            // stale
+                    if (t != null && v.queueBase == b &&
+                        UNSAFE.compareAndSwapObject(q, u, t, null)) {
+                        v.queueBase = b + 1;
+                        v.stealHint = poolIndex;
+                        ForkJoinTask<?> ps = currentSteal;
+                        currentSteal = t;
+                        t.doExec();
+                        currentSteal = ps;
+                        helped = true;
+                    }
+                }
+                // Try to descend to find v's stealer
+                ForkJoinTask<?> next = v.currentJoin;
+                if (--levels > 0 && task.status >= 0 &&
+                    next != null && next != task) {
+                    task = next;
+                    thread = v;
+                }
+                else
+                    break;  // max levels, stale, dead-end, or cyclic
             }
-            else if ((s = sp) == base || (q = queue) == null)
-                break;                            // queue empty
-            else {
-                int i = (q.length - 1) & --s;
-                long u = (i << qShift) + qBase;   // raw offset
-                ForkJoinTask<?> t = q[i];
-                if (t == null)
-                    break;                        // lost to a stealer
-                else if (t != joinMe && t.status >= 0)
-                    return running;               // cannot safely help
-                else if ((running ||
-                          (running = p.tryIncrementRunningCount())) &&
-                         UNSAFE.compareAndSwapObject(q, u, t, null)) {
-                    sp = s; // putOrderedInt may encourage more timely write
-                    // UNSAFE.putOrderedInt(this, spOffset, s);
-                    t.quietlyExec();
+        }
+        return helped;
+    }
+
+    /**
+     * Performs an uncommon case for joinTask: If task t is at base of
+     * some workers queue, steals and executes it.
+     *
+     * @param t the task
+     * @return t's status
+     */
+    private int tryDeqAndExec(ForkJoinTask<?> t) {
+        int m = pool.scanGuard & SMASK;
+        ForkJoinWorkerThread[] ws = pool.workers;
+        if (ws != null && ws.length > m && t.status >= 0) {
+            for (int j = 0; j <= m; ++j) {
+                ForkJoinTask<?>[] q; int b, i;
+                ForkJoinWorkerThread v = ws[j];
+                if (v != null &&
+                    (b = v.queueBase) != v.queueTop &&
+                    (q = v.queue) != null &&
+                    (i = (q.length - 1) & b) >= 0 &&
+                    q[i] ==  t) {
+                    long u = (i << ASHIFT) + ABASE;
+                    if (v.queueBase == b &&
+                        UNSAFE.compareAndSwapObject(q, u, t, null)) {
+                        v.queueBase = b + 1;
+                        v.stealHint = poolIndex;
+                        ForkJoinTask<?> ps = currentSteal;
+                        currentSteal = t;
+                        t.doExec();
+                        currentSteal = ps;
+                    }
+                    break;
                 }
             }
         }
-
-        int n;                                    // worker array size
-        ForkJoinWorkerThread[] ws = p.workers;
-        if (ws != null && (n = ws.length) > 1) {  // need at least 2 workers
-            ForkJoinTask<?> task = joinMe;        // base of chain
-            ForkJoinWorkerThread thread = this;   // thread with stolen task
-
-            outer:for (int d = 0; d < MAX_HELP_DEPTH; ++d) { // chain length
-                // Try to find v, the stealer of task, by first using hint
-                ForkJoinWorkerThread v = ws[thread.stealHint & (n - 1)];
-                if (v == null || v.currentSteal != task) {
-                    for (int j = 0; ; ++j) {      // search array
-                        if (j < n) {
-                            ForkJoinTask<?> vs;
-                            if ((v = ws[j]) != null &&
-                                (vs = v.currentSteal) != null) {
-                                if (joinMe.status < 0)
-                                    break outer;
-                                if (vs == task) {
-                                    if (task.status < 0)
-                                        break outer; // stale
-                                    thread.stealHint = j;
-                                    break;        // save hint for next time
-                                }
-                            }
-                        }
-                        else
-                            break outer;          // no stealer
-                    }
-                }
-
-                // Try to help v, using specialized form of deqTask
-                for (;;) {
-                    if (joinMe.status < 0)
-                        break outer;
-                    int b = v.base;
-                    ForkJoinTask<?>[] q = v.queue;
-                    if (b == v.sp || q == null)
-                        break;                    // empty
-                    int i = (q.length - 1) & b;
-                    long u = (i << qShift) + qBase;
-                    ForkJoinTask<?> t = q[i];
-                    if (task.status < 0)
-                        break outer;              // stale
-                    if (t != null &&
-                        (running ||
-                         (running = p.tryIncrementRunningCount())) &&
-                        v.base == b++ &&
-                        UNSAFE.compareAndSwapObject(q, u, t, null)) {
-                        if (t != joinMe && joinMe.status < 0) {
-                            UNSAFE.putObjectVolatile(q, u, t);
-                            break outer;          // joinMe cancelled; back out
-                        }
-                        v.base = b;
-                        if (t.status >= 0) {
-                            ForkJoinTask<?> ps = currentSteal;
-                            int pid = poolIndex;
-                            v.stealHint = pid;
-                            UNSAFE.putOrderedObject(this,
-                                                    currentStealOffset, t);
-                            t.quietlyExec();
-                            UNSAFE.putOrderedObject(this,
-                                                    currentStealOffset, ps);
-                        }
-                    }
-                    else if ((runState & TERMINATING) != 0) {
-                        joinMe.cancelIgnoringExceptions();
-                        break outer;
-                    }
-                }
-
-                // Try to descend to find v's stealer
-                ForkJoinTask<?> next = v.currentJoin;
-                if (task.status < 0 || next == null || next == task ||
-                    joinMe.status < 0)
-                    break;                 // done, stale, dead-end, or cyclic
-                task = next;
-                thread = v;
-            }
-        }
-        return running;
+        return t.status;
     }
 
     /**
-     * Implements ForkJoinTask.getSurplusQueuedTaskCount().
-     * Returns an estimate of the number of tasks, offset by a
-     * function of number of idle workers.
+     * Implements ForkJoinTask.getSurplusQueuedTaskCount().  Returns
+     * an estimate of the number of tasks, offset by a function of
+     * number of idle workers.
      *
      * This method provides a cheap heuristic guide for task
      * partitioning when programmers, frameworks, tools, or languages
@@ -1159,82 +903,96 @@
      * When all threads are active, it is on average OK to estimate
      * surplus strictly locally. In steady-state, if one thread is
      * maintaining say 2 surplus tasks, then so are others. So we can
-     * just use estimated queue length (although note that (sp - base)
-     * can be an overestimate because of stealers lagging increments
-     * of base).  However, this strategy alone leads to serious
-     * mis-estimates in some non-steady-state conditions (ramp-up,
-     * ramp-down, other stalls). We can detect many of these by
-     * further considering the number of "idle" threads, that are
+     * just use estimated queue length (although note that (queueTop -
+     * queueBase) can be an overestimate because of stealers lagging
+     * increments of queueBase).  However, this strategy alone leads
+     * to serious mis-estimates in some non-steady-state conditions
+     * (ramp-up, ramp-down, other stalls). We can detect many of these
+     * by further considering the number of "idle" threads, that are
      * known to have zero queued tasks, so compensate by a factor of
      * (#idle/#active) threads.
      */
     final int getEstimatedSurplusTaskCount() {
-        return sp - base - pool.idlePerActive();
+        return queueTop - queueBase - pool.idlePerActive();
     }
 
     /**
-     * Runs tasks until {@code pool.isQuiescent()}.
+     * Runs tasks until {@code pool.isQuiescent()}. We piggyback on
+     * pool's active count ctl maintenance, but rather than blocking
+     * when tasks cannot be found, we rescan until all others cannot
+     * find tasks either. The bracketing by pool quiescerCounts
+     * updates suppresses pool auto-shutdown mechanics that could
+     * otherwise prematurely terminate the pool because all threads
+     * appear to be inactive.
      */
     final void helpQuiescePool() {
+        boolean active = true;
         ForkJoinTask<?> ps = currentSteal; // to restore below
+        ForkJoinPool p = pool;
+        p.addQuiescerCount(1);
         for (;;) {
-            ForkJoinTask<?> t = pollLocalTask();
-            if (t != null || (t = scan()) != null)
-                t.quietlyExec();
+            ForkJoinWorkerThread[] ws = p.workers;
+            ForkJoinWorkerThread v = null;
+            int n;
+            if (queueTop != queueBase)
+                v = this;
+            else if (ws != null && (n = ws.length) > 1) {
+                ForkJoinWorkerThread w;
+                int r = nextSeed(); // cheap version of FJP.scan
+                int steps = n << 1;
+                for (int i = 0; i < steps; ++i) {
+                    if ((w = ws[(i + r) & (n - 1)]) != null &&
+                        w.queueBase != w.queueTop) {
+                        v = w;
+                        break;
+                    }
+                }
+            }
+            if (v != null) {
+                ForkJoinTask<?> t;
+                if (!active) {
+                    active = true;
+                    p.addActiveCount(1);
+                }
+                if ((t = (v != this) ? v.deqTask() :
+                     locallyFifo? locallyDeqTask() : popTask()) != null) {
+                    currentSteal = t;
+                    t.doExec();
+                    currentSteal = ps;
+                }
+            }
             else {
-                ForkJoinPool p = pool;
-                int a; // to inline CASes
                 if (active) {
-                    if (!UNSAFE.compareAndSwapInt
-                        (p, poolRunStateOffset, a = p.runState, a - 1))
-                        continue;   // retry later
-                    active = false; // inactivate
-                    UNSAFE.putOrderedObject(this, currentStealOffset, ps);
+                    active = false;
+                    p.addActiveCount(-1);
                 }
                 if (p.isQuiescent()) {
-                    active = true; // re-activate
-                    do {} while (!UNSAFE.compareAndSwapInt
-                                 (p, poolRunStateOffset, a = p.runState, a+1));
-                    return;
+                    p.addActiveCount(1);
+                    p.addQuiescerCount(-1);
+                    break;
                 }
             }
         }
     }
 
     // Unsafe mechanics
-
-    private static final sun.misc.Unsafe UNSAFE = sun.misc.Unsafe.getUnsafe();
-    private static final long spOffset =
-        objectFieldOffset("sp", ForkJoinWorkerThread.class);
-    private static final long runStateOffset =
-        objectFieldOffset("runState", ForkJoinWorkerThread.class);
-    private static final long currentJoinOffset =
-        objectFieldOffset("currentJoin", ForkJoinWorkerThread.class);
-    private static final long currentStealOffset =
-        objectFieldOffset("currentSteal", ForkJoinWorkerThread.class);
-    private static final long qBase =
-        UNSAFE.arrayBaseOffset(ForkJoinTask[].class);
-    private static final long poolRunStateOffset = // to inline CAS
-        objectFieldOffset("runState", ForkJoinPool.class);
-
-    private static final int qShift;
+    private static final sun.misc.Unsafe UNSAFE;
+    private static final long ABASE;
+    private static final int ASHIFT;
 
     static {
-        int s = UNSAFE.arrayIndexScale(ForkJoinTask[].class);
+        int s;
+        try {
+            UNSAFE = sun.misc.Unsafe.getUnsafe();
+            Class a = ForkJoinTask[].class;
+            ABASE = UNSAFE.arrayBaseOffset(a);
+            s = UNSAFE.arrayIndexScale(a);
+        } catch (Exception e) {
+            throw new Error(e);
+        }
         if ((s & (s-1)) != 0)
             throw new Error("data type scale not a power of two");
-        qShift = 31 - Integer.numberOfLeadingZeros(s);
-        MAXIMUM_QUEUE_CAPACITY = 1 << (31 - qShift);
+        ASHIFT = 31 - Integer.numberOfLeadingZeros(s);
     }
 
-    private static long objectFieldOffset(String field, Class<?> klazz) {
-        try {
-            return UNSAFE.objectFieldOffset(klazz.getDeclaredField(field));
-        } catch (NoSuchFieldException e) {
-            // Convert Exception to corresponding Error
-            NoSuchFieldError error = new NoSuchFieldError(field);
-            error.initCause(e);
-            throw error;
-        }
-    }
 }
--- a/jdk/test/java/nio/file/DirectoryStream/Basic.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/test/java/nio/file/DirectoryStream/Basic.java	Tue Mar 08 15:10:48 2011 -0800
@@ -69,7 +69,7 @@
             throw new RuntimeException("entry not found");
 
         // check filtering: f* should match foo
-        DirectoryStream.Filter<Path> filter = new DirectoryStream.Filter<>() {
+        DirectoryStream.Filter<Path> filter = new DirectoryStream.Filter<Path>() {
             private PathMatcher matcher =
                 dir.getFileSystem().getPathMatcher("glob:f*");
             public boolean accept(Path file) {
--- a/jdk/test/java/util/PriorityQueue/NoNulls.java	Tue Mar 08 15:09:49 2011 -0800
+++ b/jdk/test/java/util/PriorityQueue/NoNulls.java	Tue Mar 08 15:10:48 2011 -0800
@@ -53,7 +53,7 @@
 public class NoNulls {
     void test(String[] args) throws Throwable {
         final Comparator<String> nullTolerantComparator
-            = new Comparator<>() {
+            = new Comparator<String>() {
             public int compare(String x, String y) {
                 return (x == null ? -1 :
                         y == null ? 1 :
@@ -65,7 +65,7 @@
         nullSortedSet.add(null);
 
         final PriorityQueue<String> nullPriorityQueue
-            = new PriorityQueue<>() {
+            = new PriorityQueue<String>() {
             public Object[] toArray() { return new Object[] { null };}};
 
         final Collection<String> nullCollection = new ArrayList<>();