1 /*
   2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.  Oracle designates this
   7  * particular file as subject to the "Classpath" exception as provided
   8  * by Oracle in the LICENSE file that accompanied this code.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  */
  24 
  25 /*
  26  * This file is available under and governed by the GNU General Public
  27  * License version 2 only, as published by the Free Software Foundation.
  28  * However, the following notice accompanied the original version of this
  29  * file:
  30  *
  31  * Written by Doug Lea with assistance from members of JCP JSR-166
  32  * Expert Group and released to the public domain, as explained at
  33  * http://creativecommons.org/publicdomain/zero/1.0/
  34  */
  35 
  36 package java.util.concurrent;
  37 
  38 import java.lang.Thread.UncaughtExceptionHandler;
  39 import java.lang.invoke.MethodHandles;
  40 import java.lang.invoke.VarHandle;
  41 import java.security.AccessController;
  42 import java.security.AccessControlContext;
  43 import java.security.Permission;
  44 import java.security.Permissions;
  45 import java.security.PrivilegedAction;
  46 import java.security.ProtectionDomain;
  47 import java.util.ArrayList;
  48 import java.util.Collection;
  49 import java.util.Collections;
  50 import java.util.List;
  51 import java.util.function.Predicate;
  52 import java.util.concurrent.atomic.AtomicInteger;
  53 import java.util.concurrent.locks.LockSupport;
  54 import java.util.concurrent.locks.ReentrantLock;
  55 import java.util.concurrent.locks.Condition;


  56 
  57 /**
  58  * An {@link ExecutorService} for running {@link ForkJoinTask}s.
  59  * A {@code ForkJoinPool} provides the entry point for submissions
  60  * from non-{@code ForkJoinTask} clients, as well as management and
  61  * monitoring operations.
  62  *
  63  * <p>A {@code ForkJoinPool} differs from other kinds of {@link
  64  * ExecutorService} mainly by virtue of employing
  65  * <em>work-stealing</em>: all threads in the pool attempt to find and
  66  * execute tasks submitted to the pool and/or created by other active
  67  * tasks (eventually blocking waiting for work if none exist). This
  68  * enables efficient processing when most tasks spawn other subtasks
  69  * (as do most {@code ForkJoinTask}s), as well as when many small
  70  * tasks are submitted to the pool from external clients.  Especially
  71  * when setting <em>asyncMode</em> to true in constructors, {@code
  72  * ForkJoinPool}s may also be appropriate for use with event-style
  73  * tasks that are never joined. All worker threads are initialized
  74  * with {@link Thread#isDaemon} set {@code true}.
  75  *
  76  * <p>A static {@link #commonPool()} is available and appropriate for
  77  * most applications. The common pool is used by any ForkJoinTask that
  78  * is not explicitly submitted to a specified pool. Using the common
  79  * pool normally reduces resource usage (its threads are slowly
  80  * reclaimed during periods of non-use, and reinstated upon subsequent
  81  * use).
  82  *
  83  * <p>For applications that require separate or custom pools, a {@code
  84  * ForkJoinPool} may be constructed with a given target parallelism
  85  * level; by default, equal to the number of available processors.
  86  * The pool attempts to maintain enough active (or available) threads
  87  * by dynamically adding, suspending, or resuming internal worker
  88  * threads, even if some tasks are stalled waiting to join others.
  89  * However, no such adjustments are guaranteed in the face of blocked
  90  * I/O or other unmanaged synchronization. The nested {@link
  91  * ManagedBlocker} interface enables extension of the kinds of
  92  * synchronization accommodated. The default policies may be
  93  * overridden using a constructor with parameters corresponding to
  94  * those documented in class {@link ThreadPoolExecutor}.
  95  *
  96  * <p>In addition to execution and lifecycle control methods, this
  97  * class provides status check methods (for example
  98  * {@link #getStealCount}) that are intended to aid in developing,
  99  * tuning, and monitoring fork/join applications. Also, method
 100  * {@link #toString} returns indications of pool state in a
 101  * convenient form for informal monitoring.
 102  *
 103  * <p>As is the case with other ExecutorServices, there are three
 104  * main task execution methods summarized in the following table.
 105  * These are designed to be used primarily by clients not already
 106  * engaged in fork/join computations in the current pool.  The main
 107  * forms of these methods accept instances of {@code ForkJoinTask},
 108  * but overloaded forms also allow mixed execution of plain {@code
 109  * Runnable}- or {@code Callable}- based activities as well.  However,
 110  * tasks that are already executing in a pool should normally instead
 111  * use the within-computation forms listed in the table unless using
 112  * async event-style tasks that are not usually joined, in which case
 113  * there is little difference among choice of methods.
 114  *
 115  * <table class="plain">
 116  * <caption>Summary of task execution methods</caption>
 117  *  <tr>
 118  *    <td></td>
 119  *    <th scope="col"> Call from non-fork/join clients</th>
 120  *    <th scope="col"> Call from within fork/join computations</th>
 121  *  </tr>
 122  *  <tr>
 123  *    <th scope="row" style="text-align:left"> Arrange async execution</th>
 124  *    <td> {@link #execute(ForkJoinTask)}</td>
 125  *    <td> {@link ForkJoinTask#fork}</td>
 126  *  </tr>
 127  *  <tr>
 128  *    <th scope="row" style="text-align:left"> Await and obtain result</th>
 129  *    <td> {@link #invoke(ForkJoinTask)}</td>
 130  *    <td> {@link ForkJoinTask#invoke}</td>
 131  *  </tr>
 132  *  <tr>
 133  *    <th scope="row" style="text-align:left"> Arrange exec and obtain Future</th>
 134  *    <td> {@link #submit(ForkJoinTask)}</td>
 135  *    <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
 136  *  </tr>
 137  * </table>
 138  *
 139  * <p>The parameters used to construct the common pool may be controlled by
 140  * setting the following {@linkplain System#getProperty system properties}:
 141  * <ul>
 142  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.parallelism}
 143  * - the parallelism level, a non-negative integer
 144  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.threadFactory}
 145  * - the class name of a {@link ForkJoinWorkerThreadFactory}.
 146  * The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
 147  * is used to load this class.
 148  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.exceptionHandler}
 149  * - the class name of a {@link UncaughtExceptionHandler}.
 150  * The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
 151  * is used to load this class.
 152  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.maximumSpares}
 153  * - the maximum number of allowed extra threads to maintain target
 154  * parallelism (default 256).
 155  * </ul>
 156  * If no thread factory is supplied via a system property, then the
 157  * common pool uses a factory that uses the system class loader as the
 158  * {@linkplain Thread#getContextClassLoader() thread context class loader}.
 159  * In addition, if a {@link SecurityManager} is present, then
 160  * the common pool uses a factory supplying threads that have no
 161  * {@link Permissions} enabled.
 162  *
 163  * Upon any error in establishing these settings, default parameters
 164  * are used. It is possible to disable or limit the use of threads in
 165  * the common pool by setting the parallelism property to zero, and/or
 166  * using a factory that may return {@code null}. However doing so may
 167  * cause unjoined tasks to never be executed.
 168  *
 169  * <p><b>Implementation notes:</b> This implementation restricts the
 170  * maximum number of running threads to 32767. Attempts to create
 171  * pools with greater than the maximum number result in
 172  * {@code IllegalArgumentException}.
 173  *
 174  * <p>This implementation rejects submitted tasks (that is, by throwing
 175  * {@link RejectedExecutionException}) only when the pool is shut down
 176  * or internal resources have been exhausted.
 177  *
 178  * @since 1.7
 179  * @author Doug Lea
 180  */
 181 public class ForkJoinPool extends AbstractExecutorService {
 182 
 183     /*
 184      * Implementation Overview
 185      *
 186      * This class and its nested classes provide the main
 187      * functionality and control for a set of worker threads:
 188      * Submissions from non-FJ threads enter into submission queues.
 189      * Workers take these tasks and typically split them into subtasks
 190      * that may be stolen by other workers. Work-stealing based on
 191      * randomized scans generally leads to better throughput than
 192      * "work dealing" in which producers assign tasks to idle threads,
 193      * in part because threads that have finished other tasks before
 194      * the signalled thread wakes up (which can be a long time) can
 195      * take the task instead.  Preference rules give first priority to
 196      * processing tasks from their own queues (LIFO or FIFO, depending
 197      * on mode), then to randomized FIFO steals of tasks in other
 198      * queues.  This framework began as vehicle for supporting
 199      * tree-structured parallelism using work-stealing.  Over time,
 200      * its scalability advantages led to extensions and changes to
 201      * better support more diverse usage contexts.  Because most
 202      * internal methods and nested classes are interrelated, their
 203      * main rationale and descriptions are presented here; individual
 204      * methods and nested classes contain only brief comments about
 205      * details.
 206      *
 207      * WorkQueues
 208      * ==========
 209      *
 210      * Most operations occur within work-stealing queues (in nested
 211      * class WorkQueue).  These are special forms of Deques that
 212      * support only three of the four possible end-operations -- push,
 213      * pop, and poll (aka steal), under the further constraints that
 214      * push and pop are called only from the owning thread (or, as
 215      * extended here, under a lock), while poll may be called from
 216      * other threads.  (If you are unfamiliar with them, you probably
 217      * want to read Herlihy and Shavit's book "The Art of
 218      * Multiprocessor programming", chapter 16 describing these in
 219      * more detail before proceeding.)  The main work-stealing queue
 220      * design is roughly similar to those in the papers "Dynamic
 221      * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
 222      * (http://research.sun.com/scalable/pubs/index.html) and
 223      * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
 224      * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
 225      * The main differences ultimately stem from GC requirements that
 226      * we null out taken slots as soon as we can, to maintain as small
 227      * a footprint as possible even in programs generating huge
 228      * numbers of tasks. To accomplish this, we shift the CAS
 229      * arbitrating pop vs poll (steal) from being on the indices
 230      * ("base" and "top") to the slots themselves.
 231      *
 232      * Adding tasks then takes the form of a classic array push(task)
 233      * in a circular buffer:
 234      *    q.array[q.top++ % length] = task;
 235      *
 236      * The actual code needs to null-check and size-check the array,
 237      * uses masking, not mod, for indexing a power-of-two-sized array,
 238      * enforces memory ordering, supports resizing, and possibly
 239      * signals waiting workers to start scanning -- see below.
 240      *
 241      * The pop operation (always performed by owner) is of the form:
 242      *   if ((task = getAndSet(q.array, (q.top-1) % length, null)) != null)
 243      *        decrement top and return task;
 244      * If this fails, the queue is empty.
 245      *
 246      * The poll operation by another stealer thread is, basically:
 247      *   if (CAS nonnull task at q.array[q.base % length] to null)
 248      *       increment base and return task;
 249      *
 250      * This may fail due to contention, and may be retried.
 251      * Implementations must ensure a consistent snapshot of the base
 252      * index and the task (by looping or trying elsewhere) before
 253      * trying CAS.  There isn't actually a method of this form,
 254      * because failure due to inconsistency or contention is handled
 255      * in different ways in different contexts, normally by first
 256      * trying other queues. (For the most straightforward example, see
 257      * method pollScan.) There are further variants for cases
 258      * requiring inspection of elements before extracting them, so
 259      * must interleave these with variants of this code.  Also, a more
 260      * efficient version (nextLocalTask) is used for polls by owners.
 261      * It avoids some overhead because the queue cannot be growing
 262      * during call.
 263      *
 264      * Memory ordering.  See "Correct and Efficient Work-Stealing for
 265      * Weak Memory Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013
 266      * (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an
 267      * analysis of memory ordering requirements in work-stealing
 268      * algorithms similar to the one used here.  Inserting and
 269      * extracting tasks in array slots via volatile or atomic accesses
 270      * or explicit fences provides primary synchronization.
 271      *
 272      * Operations on deque elements require reads and writes of both
 273      * indices and slots. When possible, we allow these to occur in
 274      * any order.  Because the base and top indices (along with other
 275      * pool or array fields accessed in many methods) only imprecisely
 276      * guide where to extract from, we let accesses other than the
 277      * element getAndSet/CAS/setVolatile appear in any order, using
 278      * plain mode. But we must still preface some methods (mainly
 279      * those that may be accessed externally) with an acquireFence to
 280      * avoid unbounded staleness. This is equivalent to acting as if
 281      * callers use an acquiring read of the reference to the pool or
 282      * queue when invoking the method, even when they do not. We use
 283      * explicit acquiring reads (getSlot) rather than plain array
 284      * access when acquire mode is required but not otherwise ensured
 285      * by context. To reduce stalls by other stealers, we encourage
 286      * timely writes to the base index by immediately following
 287      * updates with a write of a volatile field that must be updated
 288      * anyway, or an Opaque-mode write if there is no such
 289      * opportunity.
 290      *
 291      * Because indices and slot contents cannot always be consistent,
 292      * the emptiness check base == top is only quiescently accurate
 293      * (and so used where this suffices). Otherwise, it may err on the
 294      * side of possibly making the queue appear nonempty when a push,
 295      * pop, or poll have not fully committed, or making it appear
 296      * empty when an update of top or base has not yet been seen.
 297      * Similarly, the check in push for the queue array being full may
 298      * trigger when not completely full, causing a resize earlier than
 299      * required.
 300      *
 301      * Mainly because of these potential inconsistencies among slots
 302      * vs indices, the poll operation, considered individually, is not
 303      * wait-free. One thief cannot successfully continue until another
 304      * in-progress one (or, if previously empty, a push) visibly
 305      * completes.  This can stall threads when required to consume
 306      * from a given queue (which may spin).  However, in the
 307      * aggregate, we ensure probabilistic non-blockingness at least
 308      * until checking quiescence (which is intrinsically blocking):
 309      * If an attempted steal fails, a scanning thief chooses a
 310      * different victim target to try next. So, in order for one thief
 311      * to progress, it suffices for any in-progress poll or new push
 312      * on any empty queue to complete. The worst cases occur when many
 313      * threads are looking for tasks being produced by a stalled
 314      * producer.
 315      *
 316      * This approach also enables support of a user mode in which
 317      * local task processing is in FIFO, not LIFO order, simply by
 318      * using poll rather than pop.  This can be useful in
 319      * message-passing frameworks in which tasks are never joined,
 320      * although with increased contention among task producers and
 321      * consumers.
 322      *
 323      * WorkQueues are also used in a similar way for tasks submitted
 324      * to the pool. We cannot mix these tasks in the same queues used
 325      * by workers. Instead, we randomly associate submission queues
 326      * with submitting threads, using a form of hashing.  The
 327      * ThreadLocalRandom probe value serves as a hash code for
 328      * choosing existing queues, and may be randomly repositioned upon
 329      * contention with other submitters.  In essence, submitters act
 330      * like workers except that they are restricted to executing local
 331      * tasks that they submitted (or when known, subtasks thereof).
 332      * Insertion of tasks in shared mode requires a lock. We use only
 333      * a simple spinlock (using field "source"), because submitters
 334      * encountering a busy queue move to a different position to use
 335      * or create other queues. They block only when registering new
 336      * queues.
 337      *
 338      * Management
 339      * ==========
 340      *
 341      * The main throughput advantages of work-stealing stem from
 342      * decentralized control -- workers mostly take tasks from
 343      * themselves or each other, at rates that can exceed a billion
 344      * per second.  Most non-atomic control is performed by some form
 345      * of scanning across or within queues.  The pool itself creates,
 346      * activates (enables scanning for and running tasks),
 347      * deactivates, blocks, and terminates threads, all with minimal
 348      * central information.  There are only a few properties that we
 349      * can globally track or maintain, so we pack them into a small
 350      * number of variables, often maintaining atomicity without
 351      * blocking or locking.  Nearly all essentially atomic control
 352      * state is held in a few volatile variables that are by far most
 353      * often read (not written) as status and consistency checks. We
 354      * pack as much information into them as we can.
 355      *
 356      * Field "ctl" contains 64 bits holding information needed to
 357      * atomically decide to add, enqueue (on an event queue), and
 358      * dequeue and release workers.  To enable this packing, we
 359      * restrict maximum parallelism to (1<<15)-1 (which is far in
 360      * excess of normal operating range) to allow ids, counts, and
 361      * their negations (used for thresholding) to fit into 16bit
 362      * subfields.
 363      *
 364      * Field "mode" holds configuration parameters as well as lifetime
 365      * status, atomically and monotonically setting SHUTDOWN, STOP,
 366      * and finally TERMINATED bits. It is updated only via bitwise
 367      * atomics (getAndBitwiseOr).
 368      *
 369      * Array "queues" holds references to WorkQueues.  It is updated
 370      * (only during worker creation and termination) under the
 371      * registrationLock, but is otherwise concurrently readable, and
 372      * accessed directly (although always prefaced by acquireFences or
 373      * other acquiring reads). To simplify index-based operations, the
 374      * array size is always a power of two, and all readers must
 375      * tolerate null slots.  Worker queues are at odd indices. Worker
 376      * ids masked with SMASK match their index. Shared (submission)
 377      * queues are at even indices. Grouping them together in this way
 378      * simplifies and speeds up task scanning.
 379      *
 380      * All worker thread creation is on-demand, triggered by task
 381      * submissions, replacement of terminated workers, and/or
 382      * compensation for blocked workers. However, all other support
 383      * code is set up to work with other policies.  To ensure that we
 384      * do not hold on to worker or task references that would prevent
 385      * GC, all accesses to workQueues are via indices into the
 386      * queues array (which is one source of some of the messy code
 387      * constructions here). In essence, the queues array serves as
 388      * a weak reference mechanism. Thus for example the stack top
 389      * subfield of ctl stores indices, not references.
 390      *
 391      * Queuing Idle Workers. Unlike HPC work-stealing frameworks, we
 392      * cannot let workers spin indefinitely scanning for tasks when
 393      * none can be found immediately, and we cannot start/resume
 394      * workers unless there appear to be tasks available.  On the
 395      * other hand, we must quickly prod them into action when new
 396      * tasks are submitted or generated. These latencies are mainly a
 397      * function of JVM park/unpark (and underlying OS) performance,
 398      * which can be slow and variable.  In many usages, ramp-up time
 399      * is the main limiting factor in overall performance, which is
 400      * compounded at program start-up by JIT compilation and
 401      * allocation. On the other hand, throughput degrades when too
 402      * many threads poll for too few tasks.
 403      *
 404      * The "ctl" field atomically maintains total and "released"
 405      * worker counts, plus the head of the available worker queue
 406      * (actually stack, represented by the lower 32bit subfield of
 407      * ctl).  Released workers are those known to be scanning for
 408      * and/or running tasks. Unreleased ("available") workers are
 409      * recorded in the ctl stack. These workers are made available for
 410      * signalling by enqueuing in ctl (see method awaitWork).  The
 411      * "queue" is a form of Treiber stack. This is ideal for
 412      * activating threads in most-recently used order, and improves
 413      * performance and locality, outweighing the disadvantages of
 414      * being prone to contention and inability to release a worker
 415      * unless it is topmost on stack. The top stack state holds the
 416      * value of the "phase" field of the worker: its index and status,
 417      * plus a version counter that, in addition to the count subfields
 418      * (also serving as version stamps) provide protection against
 419      * Treiber stack ABA effects.
 420      *
 421      * Creating workers. To create a worker, we pre-increment counts
 422      * (serving as a reservation), and attempt to construct a
 423      * ForkJoinWorkerThread via its factory. On starting, the new
 424      * thread first invokes registerWorker, where it constructs a
 425      * WorkQueue and is assigned an index in the queues array
 426      * (expanding the array if necessary).  Upon any exception across
 427      * these steps, or null return from factory, deregisterWorker
 428      * adjusts counts and records accordingly.  If a null return, the
 429      * pool continues running with fewer than the target number
 430      * workers. If exceptional, the exception is propagated, generally
 431      * to some external caller.
 432      *
 433      * WorkQueue field "phase" is used by both workers and the pool to
 434      * manage and track whether a worker is UNSIGNALLED (possibly
 435      * blocked waiting for a signal).  When a worker is enqueued its
 436      * phase field is set negative. Note that phase field updates lag
 437      * queue CAS releases; seeing a negative phase does not guarantee
 438      * that the worker is available. When queued, the lower 16 bits of
 439      * its phase must hold its pool index. So we place the index there
 440      * upon initialization and never modify these bits.
 441      *
 442      * The ctl field also serves as the basis for memory
 443      * synchronization surrounding activation. This uses a more
 444      * efficient version of a Dekker-like rule that task producers and
 445      * consumers sync with each other by both writing/CASing ctl (even
 446      * if to its current value).  However, rather than CASing ctl to
 447      * its current value in the common case where no action is
 448      * required, we reduce write contention by ensuring that
 449      * signalWork invocations are prefaced with a full-volatile memory
 450      * access (which is usually needed anyway).
 451      *
 452      * Signalling. Signals (in signalWork) cause new or reactivated
 453      * workers to scan for tasks.  Method signalWork and its callers
 454      * try to approximate the unattainable goal of having the right
 455      * number of workers activated for the tasks at hand, but must err
 456      * on the side of too many workers vs too few to avoid stalls.  If
 457      * computations are purely tree structured, it suffices for every
 458      * worker to activate another when it pushes a task into an empty
 459      * queue, resulting in O(log(#threads)) steps to full activation.
 460      * If instead, tasks come in serially from only a single producer,
 461      * each worker taking its first (since the last quiescence) task
 462      * from a queue should signal another if there are more tasks in
 463      * that queue. This is equivalent to, but generally faster than,
 464      * arranging the stealer take two tasks, re-pushing one on its own
 465      * queue, and signalling (because its queue is empty), also
 466      * resulting in logarithmic full activation time. Because we don't
 467      * know about usage patterns (or most commonly, mixtures), we use
 468      * both approaches.  We approximate the second rule by arranging
 469      * that workers in scan() do not repeat signals when repeatedly
 470      * taking tasks from any given queue, by remembering the previous
 471      * one. There are narrow windows in which both rules may apply,
 472      * leading to duplicate or unnecessary signals. Despite such
 473      * limitations, these rules usually avoid slowdowns that otherwise
 474      * occur when too many workers contend to take too few tasks, or
 475      * when producers waste most of their time resignalling.  However,
 476      * contention and overhead effects may still occur during ramp-up,
 477      * ramp-down, and small computations involving only a few workers.
 478      *
 479      * Scanning. Method scan performs top-level scanning for (and
 480      * execution of) tasks.  Scans by different workers and/or at
 481      * different times are unlikely to poll queues in the same
 482      * order. Each scan traverses and tries to poll from each queue in
 483      * a pseudorandom permutation order by starting at a random index,
 484      * and using a constant cyclically exhaustive stride; restarting
 485      * upon contention.  (Non-top-level scans; for example in
 486      * helpJoin, use simpler linear probes because they do not
 487      * systematically contend with top-level scans.)  The pseudorandom
 488      * generator need not have high-quality statistical properties in
 489      * the long term. We use Marsaglia XorShifts, seeded with the Weyl
 490      * sequence from ThreadLocalRandom probes, which are cheap and
 491      * suffice. Scans do not otherwise explicitly take into account
 492      * core affinities, loads, cache localities, etc, However, they do
 493      * exploit temporal locality (which usually approximates these) by
 494      * preferring to re-poll from the same queue after a successful
 495      * poll before trying others (see method topLevelExec).  This
 496      * reduces fairness, which is partially counteracted by using a
 497      * one-shot form of poll (tryPoll) that may lose to other workers.
 498      *
 499      * Deactivation. Method scan returns a sentinel when no tasks are
 500      * found, leading to deactivation (see awaitWork). The count
 501      * fields in ctl allow accurate discovery of quiescent states
 502      * (i.e., when all workers are idle) after deactivation. However,
 503      * this may also race with new (external) submissions, so a
 504      * recheck is also needed to determine quiescence. Upon apparently
 505      * triggering quiescence, awaitWork re-scans and self-signals if
 506      * it may have missed a signal. In other cases, a missed signal
 507      * may transiently lower parallelism because deactivation does not
 508      * necessarily mean that there is no more work, only that that
 509      * there were no tasks not taken by other workers.  But more
 510      * signals are generated (see above) to eventually reactivate if
 511      * needed.
 512      *
 513      * Trimming workers. To release resources after periods of lack of
 514      * use, a worker starting to wait when the pool is quiescent will
 515      * time out and terminate if the pool has remained quiescent for
 516      * period given by field keepAlive.
 517      *
 518      * Shutdown and Termination. A call to shutdownNow invokes
 519      * tryTerminate to atomically set a mode bit. The calling thread,
 520      * as well as every other worker thereafter terminating, helps
 521      * terminate others by cancelling their unprocessed tasks, and
 522      * waking them up. Calls to non-abrupt shutdown() preface this by
 523      * checking isQuiescent before triggering the "STOP" phase of
 524      * termination. To conform to ExecutorService invoke, invokeAll,
 525      * and invokeAny specs, we must track pool status while waiting,
 526      * and interrupt interruptible callers on termination (see
 527      * ForkJoinTask.joinForPoolInvoke etc).
 528      *
 529      * Joining Tasks
 530      * =============
 531      *
 532      * Normally, the first option when joining a task that is not done
 533      * is to try to unfork it from local queue and run it.  Otherwise,
 534      * any of several actions may be taken when one worker is waiting
 535      * to join a task stolen (or always held) by another.  Because we
 536      * are multiplexing many tasks on to a pool of workers, we can't
 537      * always just let them block (as in Thread.join).  We also cannot
 538      * just reassign the joiner's run-time stack with another and
 539      * replace it later, which would be a form of "continuation", that
 540      * even if possible is not necessarily a good idea since we may
 541      * need both an unblocked task and its continuation to progress.
 542      * Instead we combine two tactics:
 543      *
 544      *   Helping: Arranging for the joiner to execute some task that it
 545      *      could be running if the steal had not occurred.
 546      *
 547      *   Compensating: Unless there are already enough live threads,
 548      *      method tryCompensate() may create or re-activate a spare
 549      *      thread to compensate for blocked joiners until they unblock.
 550      *
 551      * A third form (implemented via tryRemove) amounts to helping a
 552      * hypothetical compensator: If we can readily tell that a
 553      * possible action of a compensator is to steal and execute the
 554      * task being joined, the joining thread can do so directly,
 555      * without the need for a compensation thread; although with a
 556      * (rare) possibility of reduced parallelism because of a
 557      * transient gap in the queue array.
 558      *
 559      * Other intermediate forms available for specific task types (for
 560      * example helpAsyncBlocker) often avoid or postpone the need for
 561      * blocking or compensation.
 562      *
 563      * The ManagedBlocker extension API can't use helping so relies
 564      * only on compensation in method awaitBlocker.
 565      *
 566      * The algorithm in helpJoin entails a form of "linear helping".
 567      * Each worker records (in field "source") the id of the queue
 568      * from which it last stole a task.  The scan in method helpJoin
 569      * uses these markers to try to find a worker to help (i.e., steal
 570      * back a task from and execute it) that could hasten completion
 571      * of the actively joined task.  Thus, the joiner executes a task
 572      * that would be on its own local deque if the to-be-joined task
 573      * had not been stolen. This is a conservative variant of the
 574      * approach described in Wagner & Calder "Leapfrogging: a portable
 575      * technique for implementing efficient futures" SIGPLAN Notices,
 576      * 1993 (http://portal.acm.org/citation.cfm?id=155354). It differs
 577      * mainly in that we only record queue ids, not full dependency
 578      * links.  This requires a linear scan of the queues array to
 579      * locate stealers, but isolates cost to when it is needed, rather
 580      * than adding to per-task overhead. Also, searches are limited to
 581      * direct and at most two levels of indirect stealers, after which
 582      * there are rapidly diminishing returns on increased overhead.
 583      * Searches can fail to locate stealers when stalls delay
 584      * recording sources.  Further, even when accurately identified,
 585      * stealers might not ever produce a task that the joiner can in
 586      * turn help with. So, compensation is tried upon failure to find
 587      * tasks to run.
 588      *
 589      * Joining CountedCompleters (see helpComplete) differs from (and
 590      * is generally more efficient than) other cases because task
 591      * eligibility is determined by checking completion chains rather
 592      * than tracking stealers.
 593      *
 594      * Joining under timeouts (ForkJoinTask timed get) uses a
 595      * constrained mixture of helping and compensating in part because
 596      * pools (actually, only the common pool) may not have any
 597      * available threads: If the pool is saturated (all available
 598      * workers are busy), the caller tries to remove and otherwise
 599      * help; else it blocks under compensation so that it may time out
 600      * independently of any tasks.
 601      *
 602      * Compensation does not by default aim to keep exactly the target
 603      * parallelism number of unblocked threads running at any given
 604      * time. Some previous versions of this class employed immediate
 605      * compensations for any blocked join. However, in practice, the
 606      * vast majority of blockages are transient byproducts of GC and
 607      * other JVM or OS activities that are made worse by replacement
 608      * when they cause longer-term oversubscription.  Rather than
 609      * impose arbitrary policies, we allow users to override the
 610      * default of only adding threads upon apparent starvation.  The
 611      * compensation mechanism may also be bounded.  Bounds for the
 612      * commonPool (see COMMON_MAX_SPARES) better enable JVMs to cope
 613      * with programming errors and abuse before running out of
 614      * resources to do so.
 615      *
 616      * Common Pool
 617      * ===========
 618      *
 619      * The static common pool always exists after static
 620      * initialization.  Since it (or any other created pool) need
 621      * never be used, we minimize initial construction overhead and
 622      * footprint to the setup of about a dozen fields.
 623      *
 624      * When external threads submit to the common pool, they can
 625      * perform subtask processing (see helpComplete and related
 626      * methods) upon joins.  This caller-helps policy makes it
 627      * sensible to set common pool parallelism level to one (or more)
 628      * less than the total number of available cores, or even zero for
 629      * pure caller-runs.  We do not need to record whether external
 630      * submissions are to the common pool -- if not, external help
 631      * methods return quickly. These submitters would otherwise be
 632      * blocked waiting for completion, so the extra effort (with
 633      * liberally sprinkled task status checks) in inapplicable cases
 634      * amounts to an odd form of limited spin-wait before blocking in
 635      * ForkJoinTask.join.
 636      *
 637      * Guarantees for common pool parallelism zero are limited to
 638      * tasks that are joined by their callers in a tree-structured
 639      * fashion or use CountedCompleters (as is true for jdk
 640      * parallelStreams). Support infiltrates several methods,
 641      * including those that retry helping steps until we are sure that
 642      * none apply if there are no workers.
 643      *
 644      * As a more appropriate default in managed environments, unless
 645      * overridden by system properties, we use workers of subclass
 646      * InnocuousForkJoinWorkerThread when there is a SecurityManager
 647      * present. These workers have no permissions set, do not belong
 648      * to any user-defined ThreadGroup, and erase all ThreadLocals
 649      * after executing any top-level task.  The associated mechanics
 650      * may be JVM-dependent and must access particular Thread class
 651      * fields to achieve this effect.
 652      *
 653      * Interrupt handling
 654      * ==================
 655      *
 656      * The framework is designed to manage task cancellation
 657      * (ForkJoinTask.cancel) independently from the interrupt status
 658      * of threads running tasks. (See the public ForkJoinTask
 659      * documentation for rationale.)  Interrupts are issued only in
 660      * tryTerminate, when workers should be terminating and tasks
 661      * should be cancelled anyway. Interrupts are cleared only when
 662      * necessary to ensure that calls to LockSupport.park do not loop
 663      * indefinitely (park returns immediately if the current thread is
 664      * interrupted). If so, interruption is reinstated after blocking
 665      * if status could be visible during the scope of any task.  For
 666      * cases in which task bodies are specified or desired to
 667      * interrupt upon cancellation, ForkJoinTask.cancel can be
 668      * overridden to do so (as is done for invoke{Any,All}).
 669      *
 670      * Memory placement
 671      * ================
 672      *
 673      * Performance can be very sensitive to placement of instances of
 674      * ForkJoinPool and WorkQueues and their queue arrays. To reduce
 675      * false-sharing impact, the @Contended annotation isolates the
 676      * ForkJoinPool.ctl field as well as the most heavily written
 677      * WorkQueue fields. These mainly reduce cache traffic by scanners.
 678      * WorkQueue arrays are presized large enough to avoid resizing
 679      * (which transiently reduces throughput) in most tree-like
 680      * computations, although not in some streaming usages. Initial
 681      * sizes are not large enough to avoid secondary contention
 682      * effects (especially for GC cardmarks) when queues are placed
 683      * near each other in memory. This is common, but has different
 684      * impact in different collectors and remains incompletely
 685      * addressed.
 686      *
 687      * Style notes
 688      * ===========
 689      *
 690      * Memory ordering relies mainly on atomic operations (CAS,
 691      * getAndSet, getAndAdd) along with explicit fences.  This can be
 692      * awkward and ugly, but also reflects the need to control
 693      * outcomes across the unusual cases that arise in very racy code
 694      * with very few invariants. All fields are read into locals
 695      * before use, and null-checked if they are references, even if
 696      * they can never be null under current usages.  Array accesses
 697      * using masked indices include checks (that are always true) that
 698      * the array length is non-zero to avoid compilers inserting more
 699      * expensive traps.  This is usually done in a "C"-like style of
 700      * listing declarations at the heads of methods or blocks, and
 701      * using inline assignments on first encounter.  Nearly all
 702      * explicit checks lead to bypass/return, not exception throws,
 703      * because they may legitimately arise during shutdown.
 704      *
 705      * There is a lot of representation-level coupling among classes
 706      * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask.  The
 707      * fields of WorkQueue maintain data structures managed by
 708      * ForkJoinPool, so are directly accessed.  There is little point
 709      * trying to reduce this, since any associated future changes in
 710      * representations will need to be accompanied by algorithmic
 711      * changes anyway. Several methods intrinsically sprawl because
 712      * they must accumulate sets of consistent reads of fields held in
 713      * local variables. Some others are artificially broken up to
 714      * reduce producer/consumer imbalances due to dynamic compilation.
 715      * There are also other coding oddities (including several
 716      * unnecessary-looking hoisted null checks) that help some methods
 717      * perform reasonably even when interpreted (not compiled).
 718      *
 719      * The order of declarations in this file is (with a few exceptions):
 720      * (1) Static utility functions
 721      * (2) Nested (static) classes
 722      * (3) Static fields
 723      * (4) Fields, along with constants used when unpacking some of them
 724      * (5) Internal control methods
 725      * (6) Callbacks and other support for ForkJoinTask methods
 726      * (7) Exported methods
 727      * (8) Static block initializing statics in minimally dependent order
 728      *
 729      * Revision notes
 730      * ==============
 731      *
 732      * The main sources of differences of January 2020 ForkJoin
 733      * classes from previous version are:
 734      *
 735      * * ForkJoinTask now uses field "aux" to support blocking joins
 736      *   and/or record exceptions, replacing reliance on builtin
 737      *   monitors and side tables.
 738      * * Scans probe slots (vs compare indices), along with related
 739      *   changes that reduce performance differences across most
 740      *   garbage collectors, and reduce contention.
 741      * * Refactoring for better integration of special task types and
 742      *   other capabilities that had been incrementally tacked on. Plus
 743      *   many minor reworkings to improve consistency.
 744      */
 745 
 746     // Static utilities
 747 
 748     /**
 749      * If there is a security manager, makes sure caller has
 750      * permission to modify threads.
 751      */
 752     private static void checkPermission() {
 753         @SuppressWarnings("removal")
 754         SecurityManager security = System.getSecurityManager();
 755         if (security != null)
 756             security.checkPermission(modifyThreadPermission);
 757     }
 758 
 759     @SuppressWarnings("removal")
 760     static AccessControlContext contextWithPermissions(Permission ... perms) {
 761         Permissions permissions = new Permissions();
 762         for (Permission perm : perms)
 763             permissions.add(perm);
 764         return new AccessControlContext(
 765             new ProtectionDomain[] { new ProtectionDomain(null, permissions) });
 766     }
 767 
 768     // Nested classes
 769 
 770     /**
 771      * Factory for creating new {@link ForkJoinWorkerThread}s.
 772      * A {@code ForkJoinWorkerThreadFactory} must be defined and used
 773      * for {@code ForkJoinWorkerThread} subclasses that extend base
 774      * functionality or initialize threads with different contexts.
 775      */
 776     public static interface ForkJoinWorkerThreadFactory {
 777         /**
 778          * Returns a new worker thread operating in the given pool.
 779          * Returning null or throwing an exception may result in tasks
 780          * never being executed.  If this method throws an exception,
 781          * it is relayed to the caller of the method (for example
 782          * {@code execute}) causing attempted thread creation. If this
 783          * method returns null or throws an exception, it is not
 784          * retried until the next attempted creation (for example
 785          * another call to {@code execute}).
 786          *
 787          * @param pool the pool this thread works in
 788          * @return the new worker thread, or {@code null} if the request
 789          *         to create a thread is rejected
 790          * @throws NullPointerException if the pool is null
 791          */
 792         public ForkJoinWorkerThread newThread(ForkJoinPool pool);
 793     }
 794 
 795     /**
 796      * Default ForkJoinWorkerThreadFactory implementation; creates a
 797      * new ForkJoinWorkerThread using the system class loader as the
 798      * thread context class loader.
 799      */
 800     static final class DefaultForkJoinWorkerThreadFactory
 801         implements ForkJoinWorkerThreadFactory {
 802         // ACC for access to the factory
 803         @SuppressWarnings("removal")
 804         private static final AccessControlContext ACC = contextWithPermissions(
 805             new RuntimePermission("getClassLoader"),
 806             new RuntimePermission("setContextClassLoader"));
 807         @SuppressWarnings("removal")
 808         public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
 809             return AccessController.doPrivileged(
 810                 new PrivilegedAction<>() {
 811                     public ForkJoinWorkerThread run() {
 812                         return new ForkJoinWorkerThread(null, pool, true, false);
 813                     }},
 814                 ACC);
 815         }
 816     }
 817 
 818     /**
 819      * Factory for CommonPool unless overridden by System property.
 820      * Creates InnocuousForkJoinWorkerThreads if a security manager is
 821      * present at time of invocation.  Support requires that we break
 822      * quite a lot of encapsulation (some via helper methods in
 823      * ThreadLocalRandom) to access and set Thread fields.
 824      */
 825     static final class DefaultCommonPoolForkJoinWorkerThreadFactory
 826         implements ForkJoinWorkerThreadFactory {
 827         @SuppressWarnings("removal")
 828         private static final AccessControlContext ACC = contextWithPermissions(
 829             modifyThreadPermission,
 830             new RuntimePermission("enableContextClassLoaderOverride"),
 831             new RuntimePermission("modifyThreadGroup"),
 832             new RuntimePermission("getClassLoader"),
 833             new RuntimePermission("setContextClassLoader"));
 834 
 835         @SuppressWarnings("removal")
 836         public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
 837             return AccessController.doPrivileged(
 838                  new PrivilegedAction<>() {
 839                      public ForkJoinWorkerThread run() {
 840                          return System.getSecurityManager() == null ?
 841                              new ForkJoinWorkerThread(null, pool, true, true):
 842                              new ForkJoinWorkerThread.
 843                              InnocuousForkJoinWorkerThread(pool); }},
 844                  ACC);
 845         }
 846     }
 847 
 848     // Constants shared across ForkJoinPool and WorkQueue
 849 
 850     // Bounds
 851     static final int SWIDTH       = 16;            // width of short
 852     static final int SMASK        = 0xffff;        // short bits == max index
 853     static final int MAX_CAP      = 0x7fff;        // max #workers - 1
 854 
 855     // Masks and units for WorkQueue.phase and ctl sp subfield
 856     static final int UNSIGNALLED  = 1 << 31;       // must be negative
 857     static final int SS_SEQ       = 1 << 16;       // version count
 858 
 859     // Mode bits and sentinels, some also used in WorkQueue fields
 860     static final int FIFO         = 1 << 16;       // fifo queue or access mode
 861     static final int SRC          = 1 << 17;       // set for valid queue ids
 862     static final int INNOCUOUS    = 1 << 18;       // set for Innocuous workers
 863     static final int QUIET        = 1 << 19;       // quiescing phase or source
 864     static final int SHUTDOWN     = 1 << 24;
 865     static final int TERMINATED   = 1 << 25;
 866     static final int STOP         = 1 << 31;       // must be negative
 867     static final int UNCOMPENSATE = 1 << 16;       // tryCompensate return
 868 
 869     /**
 870      * Initial capacity of work-stealing queue array.  Must be a power
 871      * of two, at least 2. See above.
 872      */
 873     static final int INITIAL_QUEUE_CAPACITY = 1 << 8;
 874 
 875     /**
 876      * Queues supporting work-stealing as well as external task
 877      * submission. See above for descriptions and algorithms.
 878      */
 879     static final class WorkQueue {
 880         volatile int phase;        // versioned, negative if inactive
 881         int stackPred;             // pool stack (ctl) predecessor link
 882         int config;                // index, mode, ORed with SRC after init
 883         int base;                  // index of next slot for poll
 884         ForkJoinTask<?>[] array;   // the queued tasks; power of 2 size
 885         final ForkJoinWorkerThread owner; // owning thread or null if shared
 886 
 887         // segregate fields frequently updated but not read by scans or steals
 888         @jdk.internal.vm.annotation.Contended("w")
 889         int top;                   // index of next slot for push
 890         @jdk.internal.vm.annotation.Contended("w")
 891         volatile int source;       // source queue id, lock, or sentinel
 892         @jdk.internal.vm.annotation.Contended("w")
 893         int nsteals;               // number of steals from other queues
 894 
 895         // Support for atomic operations
 896         private static final VarHandle QA; // for array slots
 897         private static final VarHandle SOURCE;
 898         private static final VarHandle BASE;
 899         static final ForkJoinTask<?> getSlot(ForkJoinTask<?>[] a, int i) {
 900             return (ForkJoinTask<?>)QA.getAcquire(a, i);
 901         }
 902         static final ForkJoinTask<?> getAndClearSlot(ForkJoinTask<?>[] a,
 903                                                      int i) {
 904             return (ForkJoinTask<?>)QA.getAndSet(a, i, null);
 905         }
 906         static final void setSlotVolatile(ForkJoinTask<?>[] a, int i,
 907                                           ForkJoinTask<?> v) {
 908             QA.setVolatile(a, i, v);
 909         }
 910         static final boolean casSlotToNull(ForkJoinTask<?>[] a, int i,
 911                                           ForkJoinTask<?> c) {
 912             return QA.compareAndSet(a, i, c, null);
 913         }
 914         final boolean tryLock() {
 915             return SOURCE.compareAndSet(this, 0, 1);
 916         }
 917         final void setBaseOpaque(int b) {
 918             BASE.setOpaque(this, b);
 919         }
 920 
 921         /**
 922          * Constructor used by ForkJoinWorkerThreads. Most fields
 923          * are initialized upon thread start, in pool.registerWorker.
 924          */
 925         WorkQueue(ForkJoinWorkerThread owner, boolean isInnocuous) {
 926             this.config = (isInnocuous) ? INNOCUOUS : 0;
 927             this.owner = owner;
 928         }
 929 
 930         /**
 931          * Constructor used for external queues.
 932          */
 933         WorkQueue(int config) {
 934             array = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
 935             this.config = config;
 936             owner = null;
 937             phase = -1;
 938         }
 939 
 940         /**
 941          * Returns an exportable index (used by ForkJoinWorkerThread).
 942          */
 943         final int getPoolIndex() {
 944             return (config & 0xffff) >>> 1; // ignore odd/even tag bit
 945         }
 946 
 947         /**
 948          * Returns the approximate number of tasks in the queue.
 949          */
 950         final int queueSize() {
 951             VarHandle.acquireFence(); // ensure fresh reads by external callers
 952             int n = top - base;
 953             return (n < 0) ? 0 : n;   // ignore transient negative
 954         }
 955 
 956         /**
 957          * Provides a more conservative estimate of whether this queue
 958          * has any tasks than does queueSize.
 959          */
 960         final boolean isEmpty() {
 961             return !((source != 0 && owner == null) || top - base > 0);
 962         }
 963 
 964         /**
 965          * Pushes a task. Call only by owner in unshared queues.
 966          *
 967          * @param task the task. Caller must ensure non-null.
 968          * @param pool (no-op if null)

 969          * @throws RejectedExecutionException if array cannot be resized
 970          */
 971         final void push(ForkJoinTask<?> task, ForkJoinPool pool) {
 972             ForkJoinTask<?>[] a = array;
 973             int s = top++, d = s - base, cap, m; // skip insert if disabled
 974             if (a != null && pool != null && (cap = a.length) > 0) {
 975                 setSlotVolatile(a, (m = cap - 1) & s, task);
 976                 if (d == m)
 977                     growArray();
 978                 if (d == m || a[m & (s - 1)] == null)
 979                     pool.signalWork(); // signal if was empty or resized



 980             }
 981         }
 982 




 983         /**
 984          * Pushes task to a shared queue with lock already held, and unlocks.
 985          *
 986          * @return true if caller should signal work
 987          */
 988         final boolean lockedPush(ForkJoinTask<?> task) {
 989             ForkJoinTask<?>[] a = array;
 990             int s = top++, d = s - base, cap, m;
 991             if (a != null && (cap = a.length) > 0) {
 992                 a[(m = cap - 1) & s] = task;
 993                 if (d == m)
 994                     growArray();
 995                 source = 0; // unlock
 996                 if (d == m || a[m & (s - 1)] == null)
 997                     return true;
 998             }
 999             return false;
1000         }
1001 
1002         /**
1003          * Doubles the capacity of array. Called by owner or with lock
1004          * held after pre-incrementing top, which is reverted on
1005          * allocation failure.
1006          */
1007         final void growArray() {
1008             ForkJoinTask<?>[] oldArray = array, newArray;
1009             int s = top - 1, oldCap, newCap;
1010             if (oldArray != null && (oldCap = oldArray.length) > 0 &&
1011                 (newCap = oldCap << 1) > 0) { // skip if disabled
1012                 try {
1013                     newArray = new ForkJoinTask<?>[newCap];
1014                 } catch (Throwable ex) {
1015                     top = s;
1016                     if (owner == null)
1017                         source = 0; // unlock
1018                     throw new RejectedExecutionException(
1019                         "Queue capacity exceeded");
1020                 }
1021                 int newMask = newCap - 1, oldMask = oldCap - 1;
1022                 for (int k = oldCap; k > 0; --k, --s) {
1023                     ForkJoinTask<?> x;        // poll old, push to new
1024                     if ((x = getAndClearSlot(oldArray, s & oldMask)) == null)
1025                         break;                // others already taken
1026                     newArray[s & newMask] = x;
1027                 }
1028                 VarHandle.releaseFence();     // fill before publish
1029                 array = newArray;
1030             }
1031         }
1032 
1033         // Variants of pop
1034 
1035         /**
1036          * Pops and returns task, or null if empty. Called only by owner.
1037          */
1038         private ForkJoinTask<?> pop() {
1039             ForkJoinTask<?> t = null;
1040             int s = top, cap; ForkJoinTask<?>[] a;
1041             if ((a = array) != null && (cap = a.length) > 0 && base != s-- &&
1042                 (t = getAndClearSlot(a, (cap - 1) & s)) != null)
1043                 top = s;
1044             return t;
1045         }
1046 
1047         /**
1048          * Pops the given task for owner only if it is at the current top.
1049          */
1050         final boolean tryUnpush(ForkJoinTask<?> task) {
1051             int s = top, cap; ForkJoinTask<?>[] a;
1052             if ((a = array) != null && (cap = a.length) > 0 && base != s-- &&
1053                 casSlotToNull(a, (cap - 1) & s, task)) {
1054                 top = s;
1055                 return true;
1056             }
1057             return false;
1058         }
1059 
1060         /**
1061          * Locking version of tryUnpush.
1062          */
1063         final boolean externalTryUnpush(ForkJoinTask<?> task) {
1064             boolean taken = false;
1065             for (;;) {
1066                 int s = top, cap, k; ForkJoinTask<?>[] a;
1067                 if ((a = array) == null || (cap = a.length) <= 0 ||
1068                     a[k = (cap - 1) & (s - 1)] != task)
1069                     break;
1070                 if (tryLock()) {
1071                     if (top == s && array == a) {
1072                         if (taken = casSlotToNull(a, k, task)) {
1073                             top = s - 1;
1074                             source = 0;
1075                             break;
1076                         }
1077                     }
1078                     source = 0; // release lock for retry
1079                 }
1080                 Thread.yield(); // trylock failure
1081             }
1082             return taken;
1083         }
1084 
1085         /**
1086          * Deep form of tryUnpush: Traverses from top and removes task if
1087          * present, shifting others to fill gap.
1088          */
1089         final boolean tryRemove(ForkJoinTask<?> task, boolean owned) {
1090             boolean taken = false;
1091             int p = top, cap; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
1092             if ((a = array) != null && task != null && (cap = a.length) > 0) {
1093                 int m = cap - 1, s = p - 1, d = p - base;
1094                 for (int i = s, k; d > 0; --i, --d) {
1095                     if ((t = a[k = i & m]) == task) {
1096                         if (owned || tryLock()) {
1097                             if ((owned || (array == a && top == p)) &&
1098                                 (taken = casSlotToNull(a, k, t))) {
1099                                 for (int j = i; j != s; ) // shift down
1100                                     a[j & m] = getAndClearSlot(a, ++j & m);
1101                                 top = s;
1102                             }
1103                             if (!owned)
1104                                 source = 0;
1105                         }
1106                         break;
1107                     }
1108                 }
1109             }
1110             return taken;
1111         }
1112 
1113         // variants of poll
1114 
1115         /**
1116          * Tries once to poll next task in FIFO order, failing on
1117          * inconsistency or contention.
1118          */
1119         final ForkJoinTask<?> tryPoll() {
1120             int cap, b, k; ForkJoinTask<?>[] a;
1121             if ((a = array) != null && (cap = a.length) > 0) {
1122                 ForkJoinTask<?> t = getSlot(a, k = (cap - 1) & (b = base));
1123                 if (base == b++ && t != null && casSlotToNull(a, k, t)) {
1124                     setBaseOpaque(b);
1125                     return t;
1126                 }
1127             }
1128             return null;
1129         }
1130 
1131         /**
1132          * Takes next task, if one exists, in order specified by mode.
1133          */
1134         final ForkJoinTask<?> nextLocalTask(int cfg) {
1135             ForkJoinTask<?> t = null;
1136             int s = top, cap; ForkJoinTask<?>[] a;
1137             if ((a = array) != null && (cap = a.length) > 0) {
1138                 for (int b, d;;) {
1139                     if ((d = s - (b = base)) <= 0)
1140                         break;
1141                     if (d == 1 || (cfg & FIFO) == 0) {
1142                         if ((t = getAndClearSlot(a, --s & (cap - 1))) != null)
1143                             top = s;
1144                         break;
1145                     }
1146                     if ((t = getAndClearSlot(a, b++ & (cap - 1))) != null) {
1147                         setBaseOpaque(b);
1148                         break;
1149                     }
1150                 }
1151             }
1152             return t;
1153         }
1154 
1155         /**
1156          * Takes next task, if one exists, using configured mode.
1157          */
1158         final ForkJoinTask<?> nextLocalTask() {
1159             return nextLocalTask(config);
1160         }
1161 
1162         /**
1163          * Returns next task, if one exists, in order specified by mode.
1164          */
1165         final ForkJoinTask<?> peek() {
1166             VarHandle.acquireFence();
1167             int cap; ForkJoinTask<?>[] a;
1168             return ((a = array) != null && (cap = a.length) > 0) ?
1169                 a[(cap - 1) & ((config & FIFO) != 0 ? base : top - 1)] : null;
1170         }
1171 
1172         // specialized execution methods
1173 
1174         /**
1175          * Runs the given (stolen) task if nonnull, as well as
1176          * remaining local tasks and/or others available from the
1177          * given queue.
1178          */
1179         final void topLevelExec(ForkJoinTask<?> task, WorkQueue q) {
1180             int cfg = config, nstolen = 1;
1181             while (task != null) {
1182                 task.doExec();
1183                 if ((task = nextLocalTask(cfg)) == null &&
1184                     q != null && (task = q.tryPoll()) != null)
1185                     ++nstolen;
1186             }
1187             nsteals += nstolen;
1188             source = 0;
1189             if ((cfg & INNOCUOUS) != 0)
1190                 ThreadLocalRandom.eraseThreadLocals(Thread.currentThread());
1191         }
1192 
1193         /**
1194          * Tries to pop and run tasks within the target's computation
1195          * until done, not found, or limit exceeded.
1196          *
1197          * @param task root of CountedCompleter computation
1198          * @param owned true if owned by a ForkJoinWorkerThread
1199          * @param limit max runs, or zero for no limit
1200          * @return task status on exit
1201          */
1202         final int helpComplete(ForkJoinTask<?> task, boolean owned, int limit) {
1203             int status = 0, cap, k, p, s; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
1204             while (task != null && (status = task.status) >= 0 &&
1205                    (a = array) != null && (cap = a.length) > 0 &&
1206                    (t = a[k = (cap - 1) & (s = (p = top) - 1)])
1207                    instanceof CountedCompleter) {
1208                 CountedCompleter<?> f = (CountedCompleter<?>)t;
1209                 boolean taken = false;
1210                 for (;;) {     // exec if root task is a completer of t
1211                     if (f == task) {
1212                         if (owned) {
1213                             if ((taken = casSlotToNull(a, k, t)))
1214                                 top = s;
1215                         }
1216                         else if (tryLock()) {
1217                             if (top == p && array == a &&
1218                                 (taken = casSlotToNull(a, k, t)))
1219                                 top = s;
1220                             source = 0;
1221                         }
1222                         if (taken)
1223                             t.doExec();
1224                         else if (!owned)
1225                             Thread.yield(); // tryLock failure
1226                         break;
1227                     }
1228                     else if ((f = f.completer) == null)
1229                         break;
1230                 }
1231                 if (taken && limit != 0 && --limit == 0)
1232                     break;
1233             }
1234             return status;
1235         }
1236 
1237         /**
1238          * Tries to poll and run AsynchronousCompletionTasks until
1239          * none found or blocker is released.
1240          *
1241          * @param blocker the blocker
1242          */
1243         final void helpAsyncBlocker(ManagedBlocker blocker) {
1244             int cap, b, d, k; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
1245             while (blocker != null && (d = top - (b = base)) > 0 &&
1246                    (a = array) != null && (cap = a.length) > 0 &&
1247                    (((t = getSlot(a, k = (cap - 1) & b)) == null && d > 1) ||
1248                     t instanceof
1249                     CompletableFuture.AsynchronousCompletionTask) &&
1250                    !blocker.isReleasable()) {
1251                 if (t != null && base == b++ && casSlotToNull(a, k, t)) {
1252                     setBaseOpaque(b);
1253                     t.doExec();
1254                 }
1255             }
1256         }
1257 
1258         // misc
1259 
1260         /** AccessControlContext for innocuous workers, created on 1st use. */
1261         @SuppressWarnings("removal")
1262         private static AccessControlContext INNOCUOUS_ACC;
1263 
1264         /**
1265          * Initializes (upon registration) InnocuousForkJoinWorkerThreads.
1266          */
1267         @SuppressWarnings("removal")
1268         final void initializeInnocuousWorker() {
1269             AccessControlContext acc; // racy construction OK
1270             if ((acc = INNOCUOUS_ACC) == null)
1271                 INNOCUOUS_ACC = acc = new AccessControlContext(
1272                     new ProtectionDomain[] { new ProtectionDomain(null, null) });
1273             Thread t = Thread.currentThread();
1274             ThreadLocalRandom.setInheritedAccessControlContext(t, acc);
1275             ThreadLocalRandom.eraseThreadLocals(t);
1276         }
1277 
1278         /**
1279          * Returns true if owned by a worker thread and not known to be blocked.
1280          */
1281         final boolean isApparentlyUnblocked() {
1282             Thread wt; Thread.State s;
1283             return ((wt = owner) != null &&
1284                     (s = wt.getState()) != Thread.State.BLOCKED &&
1285                     s != Thread.State.WAITING &&
1286                     s != Thread.State.TIMED_WAITING);
1287         }
1288 
1289         static {
1290             try {
1291                 QA = MethodHandles.arrayElementVarHandle(ForkJoinTask[].class);
1292                 MethodHandles.Lookup l = MethodHandles.lookup();
1293                 SOURCE = l.findVarHandle(WorkQueue.class, "source", int.class);
1294                 BASE = l.findVarHandle(WorkQueue.class, "base", int.class);
1295             } catch (ReflectiveOperationException e) {
1296                 throw new ExceptionInInitializerError(e);
1297             }
1298         }
1299     }
1300 
1301     // static fields (initialized in static initializer below)
1302 
1303     /**
1304      * Creates a new ForkJoinWorkerThread. This factory is used unless
1305      * overridden in ForkJoinPool constructors.
1306      */
1307     public static final ForkJoinWorkerThreadFactory
1308         defaultForkJoinWorkerThreadFactory;
1309 
1310     /**
1311      * Permission required for callers of methods that may start or
1312      * kill threads.
1313      */
1314     static final RuntimePermission modifyThreadPermission;
1315 
1316     /**
1317      * Common (static) pool. Non-null for public use unless a static
1318      * construction exception, but internal usages null-check on use
1319      * to paranoically avoid potential initialization circularities
1320      * as well as to simplify generated code.
1321      */
1322     static final ForkJoinPool common;
1323 
1324     /**
1325      * Common pool parallelism. To allow simpler use and management
1326      * when common pool threads are disabled, we allow the underlying
1327      * common.parallelism field to be zero, but in that case still report
1328      * parallelism as 1 to reflect resulting caller-runs mechanics.
1329      */
1330     static final int COMMON_PARALLELISM;
1331 
1332     /**
1333      * Limit on spare thread construction in tryCompensate.
1334      */
1335     private static final int COMMON_MAX_SPARES;
1336 
1337     /**
1338      * Sequence number for creating worker names
1339      */
1340     private static volatile int poolIds;
1341 
1342     // static configuration constants
1343 
1344     /**
1345      * Default idle timeout value (in milliseconds) for the thread
1346      * triggering quiescence to park waiting for new work
1347      */
1348     private static final long DEFAULT_KEEPALIVE = 60_000L;
1349 
1350     /**
1351      * Undershoot tolerance for idle timeouts
1352      */
1353     private static final long TIMEOUT_SLOP = 20L;
1354 
1355     /**
1356      * The default value for COMMON_MAX_SPARES.  Overridable using the
1357      * "java.util.concurrent.ForkJoinPool.common.maximumSpares" system
1358      * property.  The default value is far in excess of normal
1359      * requirements, but also far short of MAX_CAP and typical OS
1360      * thread limits, so allows JVMs to catch misuse/abuse before
1361      * running out of resources needed to do so.
1362      */
1363     private static final int DEFAULT_COMMON_MAX_SPARES = 256;
1364 
1365     /*
1366      * Bits and masks for field ctl, packed with 4 16 bit subfields:
1367      * RC: Number of released (unqueued) workers minus target parallelism
1368      * TC: Number of total workers minus target parallelism
1369      * SS: version count and status of top waiting thread
1370      * ID: poolIndex of top of Treiber stack of waiters
1371      *
1372      * When convenient, we can extract the lower 32 stack top bits
1373      * (including version bits) as sp=(int)ctl.  The offsets of counts
1374      * by the target parallelism and the positionings of fields makes
1375      * it possible to perform the most common checks via sign tests of
1376      * fields: When ac is negative, there are not enough unqueued
1377      * workers, when tc is negative, there are not enough total
1378      * workers.  When sp is non-zero, there are waiting workers.  To
1379      * deal with possibly negative fields, we use casts in and out of
1380      * "short" and/or signed shifts to maintain signedness.
1381      *
1382      * Because it occupies uppermost bits, we can add one release
1383      * count using getAndAdd of RC_UNIT, rather than CAS, when
1384      * returning from a blocked join.  Other updates entail multiple
1385      * subfields and masking, requiring CAS.
1386      *
1387      * The limits packed in field "bounds" are also offset by the
1388      * parallelism level to make them comparable to the ctl rc and tc
1389      * fields.
1390      */
1391 
1392     // Lower and upper word masks
1393     private static final long SP_MASK    = 0xffffffffL;
1394     private static final long UC_MASK    = ~SP_MASK;
1395 
1396     // Release counts
1397     private static final int  RC_SHIFT   = 48;
1398     private static final long RC_UNIT    = 0x0001L << RC_SHIFT;
1399     private static final long RC_MASK    = 0xffffL << RC_SHIFT;
1400 
1401     // Total counts
1402     private static final int  TC_SHIFT   = 32;
1403     private static final long TC_UNIT    = 0x0001L << TC_SHIFT;
1404     private static final long TC_MASK    = 0xffffL << TC_SHIFT;
1405     private static final long ADD_WORKER = 0x0001L << (TC_SHIFT + 15); // sign
1406 
1407     // Instance fields
1408 
1409     final long keepAlive;                // milliseconds before dropping if idle
1410     volatile long stealCount;            // collects worker nsteals
1411     int scanRover;                       // advances across pollScan calls
1412     volatile int threadIds;              // for worker thread names
1413     final int bounds;                    // min, max threads packed as shorts
1414     volatile int mode;                   // parallelism, runstate, queue mode
1415     WorkQueue[] queues;                  // main registry
1416     final ReentrantLock registrationLock;
1417     Condition termination;               // lazily constructed
1418     final String workerNamePrefix;       // null for common pool
1419     final ForkJoinWorkerThreadFactory factory;
1420     final UncaughtExceptionHandler ueh;  // per-worker UEH
1421     final Predicate<? super ForkJoinPool> saturate;

1422 
1423     @jdk.internal.vm.annotation.Contended("fjpctl") // segregate
1424     volatile long ctl;                   // main pool control
1425 
1426     // Support for atomic operations
1427     private static final VarHandle CTL;
1428     private static final VarHandle MODE;
1429     private static final VarHandle THREADIDS;
1430     private static final VarHandle POOLIDS;
1431     private boolean compareAndSetCtl(long c, long v) {
1432         return CTL.compareAndSet(this, c, v);
1433     }
1434     private long compareAndExchangeCtl(long c, long v) {
1435         return (long)CTL.compareAndExchange(this, c, v);
1436     }
1437     private long getAndAddCtl(long v) {
1438         return (long)CTL.getAndAdd(this, v);
1439     }
1440     private int getAndBitwiseOrMode(int v) {
1441         return (int)MODE.getAndBitwiseOr(this, v);
1442     }
1443     private int getAndAddThreadIds(int x) {
1444         return (int)THREADIDS.getAndAdd(this, x);
1445     }
1446     private static int getAndAddPoolIds(int x) {
1447         return (int)POOLIDS.getAndAdd(x);
1448     }
1449 
1450     // Creating, registering and deregistering workers
1451 
1452     /**
1453      * Tries to construct and start one worker. Assumes that total
1454      * count has already been incremented as a reservation.  Invokes
1455      * deregisterWorker on any failure.
1456      *
1457      * @return true if successful
1458      */
1459     private boolean createWorker() {
1460         ForkJoinWorkerThreadFactory fac = factory;
1461         Throwable ex = null;
1462         ForkJoinWorkerThread wt = null;
1463         try {
1464             if (fac != null && (wt = fac.newThread(this)) != null) {
1465                 wt.start();
1466                 return true;
1467             }
1468         } catch (Throwable rex) {
1469             ex = rex;
1470         }
1471         deregisterWorker(wt, ex);
1472         return false;
1473     }
1474 
1475     /**
1476      * Provides a name for ForkJoinWorkerThread constructor.
1477      */
1478     final String nextWorkerThreadName() {
1479         String prefix = workerNamePrefix;
1480         int tid = getAndAddThreadIds(1) + 1;
1481         if (prefix == null) // commonPool has no prefix
1482             prefix = "ForkJoinPool.commonPool-worker-";
1483         return prefix.concat(Integer.toString(tid));
1484     }
1485 
1486     /**
1487      * Finishes initializing and records owned queue.
1488      *
1489      * @param w caller's WorkQueue
1490      */
1491     final void registerWorker(WorkQueue w) {
1492         ReentrantLock lock = registrationLock;
1493         ThreadLocalRandom.localInit();
1494         int seed = ThreadLocalRandom.getProbe();
1495         if (w != null && lock != null) {
1496             int modebits = (mode & FIFO) | w.config;
1497             w.array = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
1498             w.stackPred = seed;                         // stash for runWorker
1499             if ((modebits & INNOCUOUS) != 0)
1500                 w.initializeInnocuousWorker();
1501             int id = (seed << 1) | 1;                   // initial index guess
1502             lock.lock();
1503             try {
1504                 WorkQueue[] qs; int n;                  // find queue index
1505                 if ((qs = queues) != null && (n = qs.length) > 0) {
1506                     int k = n, m = n - 1;
1507                     for (; qs[id &= m] != null && k > 0; id -= 2, k -= 2);
1508                     if (k == 0)
1509                         id = n | 1;                     // resize below
1510                     w.phase = w.config = id | modebits; // now publishable
1511 
1512                     if (id < n)
1513                         qs[id] = w;
1514                     else {                              // expand array
1515                         int an = n << 1, am = an - 1;
1516                         WorkQueue[] as = new WorkQueue[an];
1517                         as[id & am] = w;
1518                         for (int j = 1; j < n; j += 2)
1519                             as[j] = qs[j];
1520                         for (int j = 0; j < n; j += 2) {
1521                             WorkQueue q;
1522                             if ((q = qs[j]) != null)    // shared queues may move
1523                                 as[q.config & am] = q;
1524                         }
1525                         VarHandle.releaseFence();       // fill before publish
1526                         queues = as;
1527                     }
1528                 }
1529             } finally {
1530                 lock.unlock();
1531             }
1532         }
1533     }
1534 
1535     /**
1536      * Final callback from terminating worker, as well as upon failure
1537      * to construct or start a worker.  Removes record of worker from
1538      * array, and adjusts counts. If pool is shutting down, tries to
1539      * complete termination.
1540      *
1541      * @param wt the worker thread, or null if construction failed
1542      * @param ex the exception causing failure, or null if none
1543      */
1544     final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
1545         ReentrantLock lock = registrationLock;
1546         WorkQueue w = null;
1547         int cfg = 0;
1548         if (wt != null && (w = wt.workQueue) != null && lock != null) {
1549             WorkQueue[] qs; int n, i;
1550             cfg = w.config;
1551             long ns = w.nsteals & 0xffffffffL;
1552             lock.lock();                             // remove index from array
1553             if ((qs = queues) != null && (n = qs.length) > 0 &&
1554                 qs[i = cfg & (n - 1)] == w)
1555                 qs[i] = null;
1556             stealCount += ns;                        // accumulate steals
1557             lock.unlock();
1558             long c = ctl;
1559             if ((cfg & QUIET) == 0) // unless self-signalled, decrement counts
1560                 do {} while (c != (c = compareAndExchangeCtl(
1561                                        c, ((RC_MASK & (c - RC_UNIT)) |
1562                                            (TC_MASK & (c - TC_UNIT)) |
1563                                            (SP_MASK & c)))));
1564             else if ((int)c == 0)                    // was dropped on timeout
1565                 cfg = 0;                             // suppress signal if last
1566             for (ForkJoinTask<?> t; (t = w.pop()) != null; )
1567                 ForkJoinTask.cancelIgnoringExceptions(t); // cancel tasks
1568         }
1569 
1570         if (!tryTerminate(false, false) && w != null && (cfg & SRC) != 0)
1571             signalWork();                            // possibly replace worker
1572         if (ex != null)
1573             ForkJoinTask.rethrow(ex);
1574     }
1575 
1576     /*
1577      * Tries to create or release a worker if too few are running.
1578      */
1579     final void signalWork() {
1580         for (long c = ctl; c < 0L;) {
1581             int sp, i; WorkQueue[] qs; WorkQueue v;
1582             if ((sp = (int)c & ~UNSIGNALLED) == 0) {  // no idle workers
1583                 if ((c & ADD_WORKER) == 0L)           // enough total workers
1584                     break;
1585                 if (c == (c = compareAndExchangeCtl(
1586                               c, ((RC_MASK & (c + RC_UNIT)) |
1587                                   (TC_MASK & (c + TC_UNIT)))))) {
1588                     createWorker();
1589                     break;
1590                 }
1591             }
1592             else if ((qs = queues) == null)
1593                 break;                                // unstarted/terminated
1594             else if (qs.length <= (i = sp & SMASK))
1595                 break;                                // terminated
1596             else if ((v = qs[i]) == null)
1597                 break;                                // terminating
1598             else {
1599                 long nc = (v.stackPred & SP_MASK) | (UC_MASK & (c + RC_UNIT));
1600                 Thread vt = v.owner;
1601                 if (c == (c = compareAndExchangeCtl(c, nc))) {
1602                     v.phase = sp;
1603                     LockSupport.unpark(vt);           // release idle worker
1604                     break;
1605                 }
1606             }
1607         }
1608     }
1609 
1610     /**
1611      * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
1612      * See above for explanation.
1613      *
1614      * @param w caller's WorkQueue (may be null on failed initialization)
1615      */
1616     final void runWorker(WorkQueue w) {
1617         if (mode >= 0 && w != null) {           // skip on failed init
1618             w.config |= SRC;                    // mark as valid source
1619             int r = w.stackPred, src = 0;       // use seed from registerWorker
1620             do {
1621                 r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
1622             } while ((src = scan(w, src, r)) >= 0 ||
1623                      (src = awaitWork(w)) == 0);
1624         }
1625     }
1626 
1627     /**
1628      * Scans for and if found executes top-level tasks: Tries to poll
1629      * each queue starting at a random index with random stride,
1630      * returning source id or retry indicator if contended or
1631      * inconsistent.
1632      *
1633      * @param w caller's WorkQueue
1634      * @param prevSrc the previous queue stolen from in current phase, or 0
1635      * @param r random seed
1636      * @return id of queue if taken, negative if none found, prevSrc for retry
1637      */
1638     private int scan(WorkQueue w, int prevSrc, int r) {
1639         WorkQueue[] qs = queues;
1640         int n = (w == null || qs == null) ? 0 : qs.length;
1641         for (int step = (r >>> 16) | 1, i = n; i > 0; --i, r += step) {
1642             int j, cap, b; WorkQueue q; ForkJoinTask<?>[] a;
1643             if ((q = qs[j = r & (n - 1)]) != null && // poll at qs[j].array[k]
1644                 (a = q.array) != null && (cap = a.length) > 0) {
1645                 int k = (cap - 1) & (b = q.base), nextBase = b + 1;
1646                 int nextIndex = (cap - 1) & nextBase, src = j | SRC;
1647                 ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1648                 if (q.base != b)                // inconsistent
1649                     return prevSrc;
1650                 else if (t != null && WorkQueue.casSlotToNull(a, k, t)) {
1651                     q.base = nextBase;
1652                     ForkJoinTask<?> next = a[nextIndex];
1653                     if ((w.source = src) != prevSrc && next != null)
1654                         signalWork();           // propagate
1655                     w.topLevelExec(t, q);
1656                     return src;
1657                 }
1658                 else if (a[nextIndex] != null)  // revisit
1659                     return prevSrc;
1660             }
1661         }
1662         return (queues != qs) ? prevSrc: -1;    // possibly resized
1663     }
1664 
1665     /**
1666      * Advances worker phase, pushes onto ctl stack, and awaits signal
1667      * or reports termination.
1668      *
1669      * @return negative if terminated, else 0
1670      */
1671     private int awaitWork(WorkQueue w) {
1672         if (w == null)
1673             return -1;                       // already terminated
1674         int phase = (w.phase + SS_SEQ) & ~UNSIGNALLED;
1675         w.phase = phase | UNSIGNALLED;       // advance phase
1676         long prevCtl = ctl, c;               // enqueue
1677         do {
1678             w.stackPred = (int)prevCtl;
1679             c = ((prevCtl - RC_UNIT) & UC_MASK) | (phase & SP_MASK);
1680         } while (prevCtl != (prevCtl = compareAndExchangeCtl(prevCtl, c)));
1681 
1682         Thread.interrupted();                // clear status
1683         LockSupport.setCurrentBlocker(this); // prepare to block (exit also OK)
1684         long deadline = 0L;                  // nonzero if possibly quiescent
1685         int ac = (int)(c >> RC_SHIFT), md;
1686         if ((md = mode) < 0)                 // pool is terminating
1687             return -1;
1688         else if ((md & SMASK) + ac <= 0) {
1689             boolean checkTermination = (md & SHUTDOWN) != 0;
1690             if ((deadline = System.currentTimeMillis() + keepAlive) == 0L)
1691                 deadline = 1L;               // avoid zero
1692             WorkQueue[] qs = queues;         // check for racing submission
1693             int n = (qs == null) ? 0 : qs.length;
1694             for (int i = 0; i < n; i += 2) {
1695                 WorkQueue q; ForkJoinTask<?>[] a; int cap, b;
1696                 if (ctl != c) {              // already signalled
1697                     checkTermination = false;
1698                     break;
1699                 }
1700                 else if ((q = qs[i]) != null &&
1701                          (a = q.array) != null && (cap = a.length) > 0 &&
1702                          ((b = q.base) != q.top || a[(cap - 1) & b] != null ||
1703                           q.source != 0)) {
1704                     if (compareAndSetCtl(c, prevCtl))
1705                         w.phase = phase;     // self-signal
1706                     checkTermination = false;
1707                     break;
1708                 }
1709             }
1710             if (checkTermination && tryTerminate(false, false))
1711                 return -1;                   // trigger quiescent termination
1712         }
1713 
1714         for (boolean alt = false;;) {        // await activation or termination
1715             if (w.phase >= 0)
1716                 break;
1717             else if (mode < 0)
1718                 return -1;
1719             else if ((c = ctl) == prevCtl)
1720                 Thread.onSpinWait();         // signal in progress
1721             else if (!(alt = !alt))          // check between park calls
1722                 Thread.interrupted();
1723             else if (deadline == 0L)
1724                 LockSupport.park();
1725             else if (deadline - System.currentTimeMillis() > TIMEOUT_SLOP)
1726                 LockSupport.parkUntil(deadline);
1727             else if (((int)c & SMASK) == (w.config & SMASK) &&
1728                      compareAndSetCtl(c, ((UC_MASK & (c - TC_UNIT)) |
1729                                           (prevCtl & SP_MASK)))) {
1730                 w.config |= QUIET;           // sentinel for deregisterWorker
1731                 return -1;                   // drop on timeout
1732             }
1733             else if ((deadline += keepAlive) == 0L)
1734                 deadline = 1L;               // not at head; restart timer
1735         }
1736         return 0;
1737     }
1738 
1739     // Utilities used by ForkJoinTask
1740 
1741     /**
1742      * Returns true if can start terminating if enabled, or already terminated
1743      */
1744     final boolean canStop() {
1745         outer: for (long oldSum = 0L;;) { // repeat until stable
1746             int md; WorkQueue[] qs;  long c;
1747             if ((qs = queues) == null || ((md = mode) & STOP) != 0)
1748                 return true;
1749             if ((md & SMASK) + (int)((c = ctl) >> RC_SHIFT) > 0)
1750                 break;
1751             long checkSum = c;
1752             for (int i = 1; i < qs.length; i += 2) { // scan submitters
1753                 WorkQueue q; ForkJoinTask<?>[] a; int s = 0, cap;
1754                 if ((q = qs[i]) != null && (a = q.array) != null &&
1755                     (cap = a.length) > 0 &&
1756                     ((s = q.top) != q.base || a[(cap - 1) & s] != null ||
1757                      q.source != 0))
1758                     break outer;
1759                 checkSum += (((long)i) << 32) ^ s;
1760             }
1761             if (oldSum == (oldSum = checkSum) && queues == qs)
1762                 return true;
1763         }
1764         return (mode & STOP) != 0; // recheck mode on false return
1765     }
1766 
1767     /**
1768      * Tries to decrement counts (sometimes implicitly) and possibly
1769      * arrange for a compensating worker in preparation for
1770      * blocking. May fail due to interference, in which case -1 is
1771      * returned so caller may retry. A zero return value indicates
1772      * that the caller doesn't need to re-adjust counts when later
1773      * unblocked.
1774      *
1775      * @param c incoming ctl value
1776      * @return UNCOMPENSATE: block then adjust, 0: block, -1 : retry
1777      */
1778     private int tryCompensate(long c) {
1779         Predicate<? super ForkJoinPool> sat;
1780         int md = mode, b = bounds;
1781         // counts are signed; centered at parallelism level == 0
1782         int minActive = (short)(b & SMASK),
1783             maxTotal  = b >>> SWIDTH,
1784             active    = (int)(c >> RC_SHIFT),
1785             total     = (short)(c >>> TC_SHIFT),
1786             sp        = (int)c & ~UNSIGNALLED;
1787         if ((md & SMASK) == 0)
1788             return 0;                  // cannot compensate if parallelism zero
1789         else if (total >= 0) {
1790             if (sp != 0) {                        // activate idle worker
1791                 WorkQueue[] qs; int n; WorkQueue v;
1792                 if ((qs = queues) != null && (n = qs.length) > 0 &&
1793                     (v = qs[sp & (n - 1)]) != null) {
1794                     Thread vt = v.owner;
1795                     long nc = ((long)v.stackPred & SP_MASK) | (UC_MASK & c);
1796                     if (compareAndSetCtl(c, nc)) {
1797                         v.phase = sp;
1798                         LockSupport.unpark(vt);
1799                         return UNCOMPENSATE;
1800                     }
1801                 }
1802                 return -1;                        // retry
1803             }
1804             else if (active > minActive) {        // reduce parallelism
1805                 long nc = ((RC_MASK & (c - RC_UNIT)) | (~RC_MASK & c));
1806                 return compareAndSetCtl(c, nc) ? UNCOMPENSATE : -1;
1807             }
1808         }
1809         if (total < maxTotal) {                   // expand pool
1810             long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
1811             return (!compareAndSetCtl(c, nc) ? -1 :
1812                     !createWorker() ? 0 : UNCOMPENSATE);
1813         }
1814         else if (!compareAndSetCtl(c, c))         // validate
1815             return -1;
1816         else if ((sat = saturate) != null && sat.test(this))
1817             return 0;
1818         else
1819             throw new RejectedExecutionException(
1820                 "Thread limit exceeded replacing blocked worker");
1821     }
1822 
1823     /**
1824      * Readjusts RC count; called from ForkJoinTask after blocking.
1825      */
1826     final void uncompensate() {
1827         getAndAddCtl(RC_UNIT);
1828     }
1829 
1830     /**
1831      * Helps if possible until the given task is done.  Scans other
1832      * queues for a task produced by one of w's stealers; returning
1833      * compensated blocking sentinel if none are found.
1834      *
1835      * @param task the task
1836      * @param w caller's WorkQueue
1837      * @param canHelp if false, compensate only
1838      * @return task status on exit, or UNCOMPENSATE for compensated blocking
1839      */
1840     final int helpJoin(ForkJoinTask<?> task, WorkQueue w, boolean canHelp) {
1841         int s = 0;
1842         if (task != null && w != null) {
1843             int wsrc = w.source, wid = w.config & SMASK, r = wid + 2;
1844             boolean scan = true;
1845             long c = 0L;                          // track ctl stability
1846             outer: for (;;) {
1847                 if ((s = task.status) < 0)
1848                     break;
1849                 else if (scan = !scan) {          // previous scan was empty
1850                     if (mode < 0)
1851                         ForkJoinTask.cancelIgnoringExceptions(task);
1852                     else if (c == (c = ctl) && (s = tryCompensate(c)) >= 0)
1853                         break;                    // block
1854                 }
1855                 else if (canHelp) {               // scan for subtasks
1856                     WorkQueue[] qs = queues;
1857                     int n = (qs == null) ? 0 : qs.length, m = n - 1;
1858                     for (int i = n; i > 0; i -= 2, r += 2) {
1859                         int j; WorkQueue q, x, y; ForkJoinTask<?>[] a;
1860                         if ((q = qs[j = r & m]) != null) {
1861                             int sq = q.source & SMASK, cap, b;
1862                             if ((a = q.array) != null && (cap = a.length) > 0) {
1863                                 int k = (cap - 1) & (b = q.base);
1864                                 int nextBase = b + 1, src = j | SRC, sx;
1865                                 ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1866                                 boolean eligible = sq == wid ||
1867                                     ((x = qs[sq & m]) != null &&   // indirect
1868                                      ((sx = (x.source & SMASK)) == wid ||
1869                                       ((y = qs[sx & m]) != null && // 2-indirect
1870                                        (y.source & SMASK) == wid)));
1871                                 if ((s = task.status) < 0)
1872                                     break outer;
1873                                 else if ((q.source & SMASK) != sq ||
1874                                          q.base != b)
1875                                     scan = true;          // inconsistent
1876                                 else if (t == null)
1877                                     scan |= (a[nextBase & (cap - 1)] != null ||
1878                                              q.top != b); // lagging
1879                                 else if (eligible) {
1880                                     if (WorkQueue.casSlotToNull(a, k, t)) {
1881                                         q.base = nextBase;
1882                                         w.source = src;
1883                                         t.doExec();
1884                                         w.source = wsrc;
1885                                     }
1886                                     scan = true;
1887                                     break;
1888                                 }
1889                             }
1890                         }
1891                     }
1892                 }
1893             }
1894         }
1895         return s;
1896     }
1897 
1898     /**
1899      * Extra helpJoin steps for CountedCompleters.  Scans for and runs
1900      * subtasks of the given root task, returning if none are found.
1901      *
1902      * @param task root of CountedCompleter computation
1903      * @param w caller's WorkQueue
1904      * @param owned true if owned by a ForkJoinWorkerThread
1905      * @return task status on exit
1906      */
1907     final int helpComplete(ForkJoinTask<?> task, WorkQueue w, boolean owned) {
1908         int s = 0;
1909         if (task != null && w != null) {
1910             int r = w.config;
1911             boolean scan = true, locals = true;
1912             long c = 0L;
1913             outer: for (;;) {
1914                 if (locals) {                     // try locals before scanning
1915                     if ((s = w.helpComplete(task, owned, 0)) < 0)
1916                         break;
1917                     locals = false;
1918                 }
1919                 else if ((s = task.status) < 0)
1920                     break;
1921                 else if (scan = !scan) {
1922                     if (c == (c = ctl))
1923                         break;
1924                 }
1925                 else {                            // scan for subtasks
1926                     WorkQueue[] qs = queues;
1927                     int n = (qs == null) ? 0 : qs.length;
1928                     for (int i = n; i > 0; --i, ++r) {
1929                         int j, cap, b; WorkQueue q; ForkJoinTask<?>[] a;
1930                         boolean eligible = false;
1931                         if ((q = qs[j = r & (n - 1)]) != null &&
1932                             (a = q.array) != null && (cap = a.length) > 0) {
1933                             int k = (cap - 1) & (b = q.base), nextBase = b + 1;
1934                             ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1935                             if (t instanceof CountedCompleter) {
1936                                 CountedCompleter<?> f = (CountedCompleter<?>)t;
1937                                 do {} while (!(eligible = (f == task)) &&
1938                                              (f = f.completer) != null);
1939                             }
1940                             if ((s = task.status) < 0)
1941                                 break outer;
1942                             else if (q.base != b)
1943                                 scan = true;       // inconsistent
1944                             else if (t == null)
1945                                 scan |= (a[nextBase & (cap - 1)] != null ||
1946                                          q.top != b);
1947                             else if (eligible) {
1948                                 if (WorkQueue.casSlotToNull(a, k, t)) {
1949                                     q.setBaseOpaque(nextBase);
1950                                     t.doExec();
1951                                     locals = true;
1952                                 }
1953                                 scan = true;
1954                                 break;
1955                             }
1956                         }
1957                     }
1958                 }
1959             }
1960         }
1961         return s;
1962     }
1963 
1964     /**
1965      * Scans for and returns a polled task, if available.  Used only
1966      * for untracked polls. Begins scan at an index (scanRover)
1967      * advanced on each call, to avoid systematic unfairness.
1968      *
1969      * @param submissionsOnly if true, only scan submission queues
1970      */
1971     private ForkJoinTask<?> pollScan(boolean submissionsOnly) {
1972         VarHandle.acquireFence();
1973         int r = scanRover += 0x61c88647; // Weyl increment; raciness OK
1974         if (submissionsOnly)             // even indices only
1975             r &= ~1;
1976         int step = (submissionsOnly) ? 2 : 1;
1977         WorkQueue[] qs; int n;
1978         while ((qs = queues) != null && (n = qs.length) > 0) {
1979             boolean scan = false;
1980             for (int i = 0; i < n; i += step) {
1981                 int j, cap, b; WorkQueue q; ForkJoinTask<?>[] a;
1982                 if ((q = qs[j = (n - 1) & (r + i)]) != null &&
1983                     (a = q.array) != null && (cap = a.length) > 0) {
1984                     int k = (cap - 1) & (b = q.base), nextBase = b + 1;
1985                     ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1986                     if (q.base != b)
1987                         scan = true;
1988                     else if (t == null)
1989                         scan |= (q.top != b || a[nextBase & (cap - 1)] != null);
1990                     else if (!WorkQueue.casSlotToNull(a, k, t))
1991                         scan = true;
1992                     else {
1993                         q.setBaseOpaque(nextBase);
1994                         return t;
1995                     }
1996                 }
1997             }
1998             if (!scan && queues == qs)
1999                 break;
2000         }
2001         return null;
2002     }
2003 
2004     /**
2005      * Runs tasks until {@code isQuiescent()}. Rather than blocking
2006      * when tasks cannot be found, rescans until all others cannot
2007      * find tasks either.
2008      *
2009      * @param nanos max wait time (Long.MAX_VALUE if effectively untimed)
2010      * @param interruptible true if return on interrupt
2011      * @return positive if quiescent, negative if interrupted, else 0
2012      */
2013     final int helpQuiescePool(WorkQueue w, long nanos, boolean interruptible) {
2014         if (w == null)
2015             return 0;
2016         long startTime = System.nanoTime(), parkTime = 0L;
2017         int prevSrc = w.source, wsrc = prevSrc, cfg = w.config, r = cfg + 1;
2018         for (boolean active = true, locals = true;;) {
2019             boolean busy = false, scan = false;
2020             if (locals) {  // run local tasks before (re)polling
2021                 locals = false;
2022                 for (ForkJoinTask<?> u; (u = w.nextLocalTask(cfg)) != null;)
2023                     u.doExec();
2024             }
2025             WorkQueue[] qs = queues;
2026             int n = (qs == null) ? 0 : qs.length;
2027             for (int i = n; i > 0; --i, ++r) {
2028                 int j, b, cap; WorkQueue q; ForkJoinTask<?>[] a;
2029                 if ((q = qs[j = (n - 1) & r]) != null && q != w &&
2030                     (a = q.array) != null && (cap = a.length) > 0) {
2031                     int k = (cap - 1) & (b = q.base);
2032                     int nextBase = b + 1, src = j | SRC;
2033                     ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
2034                     if (q.base != b)
2035                         busy = scan = true;
2036                     else if (t != null) {
2037                         busy = scan = true;
2038                         if (!active) {    // increment before taking
2039                             active = true;
2040                             getAndAddCtl(RC_UNIT);
2041                         }
2042                         if (WorkQueue.casSlotToNull(a, k, t)) {
2043                             q.base = nextBase;
2044                             w.source = src;
2045                             t.doExec();
2046                             w.source = wsrc = prevSrc;
2047                             locals = true;
2048                         }
2049                         break;
2050                     }
2051                     else if (!busy) {
2052                         if (q.top != b || a[nextBase & (cap - 1)] != null)
2053                             busy = scan = true;
2054                         else if (q.source != QUIET && q.phase >= 0)
2055                             busy = true;
2056                     }
2057                 }
2058             }
2059             VarHandle.acquireFence();
2060             if (!scan && queues == qs) {
2061                 boolean interrupted;
2062                 if (!busy) {
2063                     w.source = prevSrc;
2064                     if (!active)
2065                         getAndAddCtl(RC_UNIT);
2066                     return 1;
2067                 }
2068                 if (wsrc != QUIET)
2069                     w.source = wsrc = QUIET;
2070                 if (active) {                 // decrement
2071                     active = false;
2072                     parkTime = 0L;
2073                     getAndAddCtl(RC_MASK & -RC_UNIT);
2074                 }
2075                 else if (parkTime == 0L) {
2076                     parkTime = 1L << 10; // initially about 1 usec
2077                     Thread.yield();
2078                 }
2079                 else if ((interrupted = interruptible && Thread.interrupted()) ||
2080                          System.nanoTime() - startTime > nanos) {
2081                     getAndAddCtl(RC_UNIT);
2082                     return interrupted ? -1 : 0;
2083                 }
2084                 else {
2085                     LockSupport.parkNanos(this, parkTime);
2086                     if (parkTime < nanos >>> 8 && parkTime < 1L << 20)
2087                         parkTime <<= 1;  // max sleep approx 1 sec or 1% nanos
2088                 }
2089             }
2090         }
2091     }
2092 
2093     /**
2094      * Helps quiesce from external caller until done, interrupted, or timeout
2095      *
2096      * @param nanos max wait time (Long.MAX_VALUE if effectively untimed)
2097      * @param interruptible true if return on interrupt
2098      * @return positive if quiescent, negative if interrupted, else 0
2099      */
2100     final int externalHelpQuiescePool(long nanos, boolean interruptible) {
2101         for (long startTime = System.nanoTime(), parkTime = 0L;;) {
2102             ForkJoinTask<?> t;
2103             if ((t = pollScan(false)) != null) {
2104                 t.doExec();
2105                 parkTime = 0L;
2106             }
2107             else if (canStop())
2108                 return 1;
2109             else if (parkTime == 0L) {
2110                 parkTime = 1L << 10;
2111                 Thread.yield();
2112             }
2113             else if ((System.nanoTime() - startTime) > nanos)
2114                 return 0;
2115             else if (interruptible && Thread.interrupted())
2116                 return -1;
2117             else {
2118                 LockSupport.parkNanos(this, parkTime);
2119                 if (parkTime < nanos >>> 8 && parkTime < 1L << 20)
2120                     parkTime <<= 1;
2121             }
2122         }
2123     }
2124 
2125     /**
2126      * Gets and removes a local or stolen task for the given worker.
2127      *
2128      * @return a task, if available
2129      */
2130     final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
2131         ForkJoinTask<?> t;
2132         if (w == null || (t = w.nextLocalTask(w.config)) == null)
2133             t = pollScan(false);
2134         return t;
2135     }
2136 
2137     // External operations
2138 
2139     /**
2140      * Finds and locks a WorkQueue for an external submitter, or
2141      * returns null if shutdown or terminating.
2142      */
2143     final WorkQueue submissionQueue() {
2144         int r;
2145         if ((r = ThreadLocalRandom.getProbe()) == 0) {
2146             ThreadLocalRandom.localInit();           // initialize caller's probe
2147             r = ThreadLocalRandom.getProbe();
2148         }
2149         for (int id = r << 1;;) {                    // even indices only
2150             int md = mode, n, i; WorkQueue q; ReentrantLock lock;
2151             WorkQueue[] qs = queues;
2152             if ((md & SHUTDOWN) != 0 || qs == null || (n = qs.length) <= 0)
2153                 return null;
2154             else if ((q = qs[i = (n - 1) & id]) == null) {
2155                 if ((lock = registrationLock) != null) {
2156                     WorkQueue w = new WorkQueue(id | SRC);
2157                     lock.lock();                    // install under lock
2158                     if (qs[i] == null)
2159                         qs[i] = w;                  // else lost race; discard
2160                     lock.unlock();
2161                 }
2162             }
2163             else if (!q.tryLock())                  // move and restart
2164                 id = (r = ThreadLocalRandom.advanceProbe(r)) << 1;
2165             else
2166                 return q;
2167         }
2168     }
2169 
2170     /**
2171      * Adds the given task to an external submission queue, or throws
2172      * exception if shutdown or terminating.
2173      *
2174      * @param task the task. Caller must ensure non-null.
2175      */
2176     final void externalPush(ForkJoinTask<?> task) {
2177         WorkQueue q;
2178         if ((q = submissionQueue()) == null)
2179             throw new RejectedExecutionException(); // shutdown or disabled
2180         else if (q.lockedPush(task))
2181             signalWork();
2182     }
2183 
2184     /**
2185      * Pushes a possibly-external submission.
2186      */
2187     private <T> ForkJoinTask<T> externalSubmit(ForkJoinTask<T> task) {
2188         Thread t; ForkJoinWorkerThread wt; WorkQueue q;
2189         if (task == null)
2190             throw new NullPointerException();
2191         if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) &&
2192             (q = (wt = (ForkJoinWorkerThread)t).workQueue) != null &&
2193             wt.pool == this)
2194             q.push(task, this);
2195         else
2196             externalPush(task);
2197         return task;
2198     }
2199 

















2200     /**
2201      * Returns common pool queue for an external thread that has
2202      * possibly ever submitted a common pool task (nonzero probe), or
2203      * null if none.
2204      */
2205     static WorkQueue commonQueue() {
2206         ForkJoinPool p; WorkQueue[] qs;
2207         int r = ThreadLocalRandom.getProbe(), n;
2208         return ((p = common) != null && (qs = p.queues) != null &&
2209                 (n = qs.length) > 0 && r != 0) ?
2210             qs[(n - 1) & (r << 1)] : null;
2211     }
2212 
2213     /**
2214      * Returns queue for an external thread, if one exists
2215      */
2216     final WorkQueue externalQueue() {
2217         WorkQueue[] qs;
2218         int r = ThreadLocalRandom.getProbe(), n;
2219         return ((qs = queues) != null && (n = qs.length) > 0 && r != 0) ?
2220             qs[(n - 1) & (r << 1)] : null;
2221     }
2222 
2223     /**
2224      * If the given executor is a ForkJoinPool, poll and execute
2225      * AsynchronousCompletionTasks from worker's queue until none are
2226      * available or blocker is released.
2227      */
2228     static void helpAsyncBlocker(Executor e, ManagedBlocker blocker) {
2229         WorkQueue w = null; Thread t; ForkJoinWorkerThread wt;
2230         if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
2231             if ((wt = (ForkJoinWorkerThread)t).pool == e)
2232                 w = wt.workQueue;
2233         }
2234         else if (e instanceof ForkJoinPool)
2235             w = ((ForkJoinPool)e).externalQueue();
2236         if (w != null)
2237             w.helpAsyncBlocker(blocker);
2238     }
2239 
2240     /**
2241      * Returns a cheap heuristic guide for task partitioning when
2242      * programmers, frameworks, tools, or languages have little or no
2243      * idea about task granularity.  In essence, by offering this
2244      * method, we ask users only about tradeoffs in overhead vs
2245      * expected throughput and its variance, rather than how finely to
2246      * partition tasks.
2247      *
2248      * In a steady state strict (tree-structured) computation, each
2249      * thread makes available for stealing enough tasks for other
2250      * threads to remain active. Inductively, if all threads play by
2251      * the same rules, each thread should make available only a
2252      * constant number of tasks.
2253      *
2254      * The minimum useful constant is just 1. But using a value of 1
2255      * would require immediate replenishment upon each steal to
2256      * maintain enough tasks, which is infeasible.  Further,
2257      * partitionings/granularities of offered tasks should minimize
2258      * steal rates, which in general means that threads nearer the top
2259      * of computation tree should generate more than those nearer the
2260      * bottom. In perfect steady state, each thread is at
2261      * approximately the same level of computation tree. However,
2262      * producing extra tasks amortizes the uncertainty of progress and
2263      * diffusion assumptions.
2264      *
2265      * So, users will want to use values larger (but not much larger)
2266      * than 1 to both smooth over transient shortages and hedge
2267      * against uneven progress; as traded off against the cost of
2268      * extra task overhead. We leave the user to pick a threshold
2269      * value to compare with the results of this call to guide
2270      * decisions, but recommend values such as 3.
2271      *
2272      * When all threads are active, it is on average OK to estimate
2273      * surplus strictly locally. In steady-state, if one thread is
2274      * maintaining say 2 surplus tasks, then so are others. So we can
2275      * just use estimated queue length.  However, this strategy alone
2276      * leads to serious mis-estimates in some non-steady-state
2277      * conditions (ramp-up, ramp-down, other stalls). We can detect
2278      * many of these by further considering the number of "idle"
2279      * threads, that are known to have zero queued tasks, so
2280      * compensate by a factor of (#idle/#active) threads.
2281      */
2282     static int getSurplusQueuedTaskCount() {
2283         Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
2284         if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) &&
2285             (pool = (wt = (ForkJoinWorkerThread)t).pool) != null &&
2286             (q = wt.workQueue) != null) {
2287             int p = pool.mode & SMASK;
2288             int a = p + (int)(pool.ctl >> RC_SHIFT);
2289             int n = q.top - q.base;
2290             return n - (a > (p >>>= 1) ? 0 :
2291                         a > (p >>>= 1) ? 1 :
2292                         a > (p >>>= 1) ? 2 :
2293                         a > (p >>>= 1) ? 4 :
2294                         8);
2295         }
2296         return 0;
2297     }
2298 
2299     // Termination
2300 
2301     /**
2302      * Possibly initiates and/or completes termination.
2303      *
2304      * @param now if true, unconditionally terminate, else only
2305      * if no work and no active workers
2306      * @param enable if true, terminate when next possible
2307      * @return true if terminating or terminated
2308      */
2309     private boolean tryTerminate(boolean now, boolean enable) {
2310         int md; // try to set SHUTDOWN, then STOP, then help terminate
2311         if (((md = mode) & SHUTDOWN) == 0) {
2312             if (!enable)
2313                 return false;
2314             md = getAndBitwiseOrMode(SHUTDOWN);
2315         }
2316         if ((md & STOP) == 0) {
2317             if (!now && !canStop())
2318                 return false;
2319             md = getAndBitwiseOrMode(STOP);
2320         }
2321         for (boolean rescan = true;;) { // repeat until no changes
2322             boolean changed = false;
2323             for (ForkJoinTask<?> t; (t = pollScan(false)) != null; ) {
2324                 changed = true;
2325                 ForkJoinTask.cancelIgnoringExceptions(t); // help cancel
2326             }
2327             WorkQueue[] qs; int n; WorkQueue q; Thread thread;
2328             if ((qs = queues) != null && (n = qs.length) > 0) {
2329                 for (int j = 1; j < n; j += 2) { // unblock other workers
2330                     if ((q = qs[j]) != null && (thread = q.owner) != null &&
2331                         !thread.isInterrupted()) {
2332                         changed = true;
2333                         try {
2334                             thread.interrupt();
2335                         } catch (Throwable ignore) {
2336                         }
2337                     }
2338                 }
2339             }
2340             ReentrantLock lock; Condition cond; // signal when no workers
2341             if (((md = mode) & TERMINATED) == 0 &&
2342                 (md & SMASK) + (short)(ctl >>> TC_SHIFT) <= 0 &&
2343                 (getAndBitwiseOrMode(TERMINATED) & TERMINATED) == 0 &&
2344                 (lock = registrationLock) != null) {
2345                 lock.lock();
2346                 if ((cond = termination) != null)
2347                     cond.signalAll();
2348                 lock.unlock();

2349             }
2350             if (changed)
2351                 rescan = true;
2352             else if (rescan)
2353                 rescan = false;
2354             else
2355                 break;
2356         }
2357         return true;
2358     }
2359 
2360     // Exported methods
2361 
2362     // Constructors
2363 
2364     /**
2365      * Creates a {@code ForkJoinPool} with parallelism equal to {@link
2366      * java.lang.Runtime#availableProcessors}, using defaults for all
2367      * other parameters (see {@link #ForkJoinPool(int,
2368      * ForkJoinWorkerThreadFactory, UncaughtExceptionHandler, boolean,
2369      * int, int, int, Predicate, long, TimeUnit)}).
2370      *
2371      * @throws SecurityException if a security manager exists and
2372      *         the caller is not permitted to modify threads
2373      *         because it does not hold {@link
2374      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2375      */
2376     public ForkJoinPool() {
2377         this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
2378              defaultForkJoinWorkerThreadFactory, null, false,
2379              0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
2380     }
2381 
2382     /**
2383      * Creates a {@code ForkJoinPool} with the indicated parallelism
2384      * level, using defaults for all other parameters (see {@link
2385      * #ForkJoinPool(int, ForkJoinWorkerThreadFactory,
2386      * UncaughtExceptionHandler, boolean, int, int, int, Predicate,
2387      * long, TimeUnit)}).
2388      *
2389      * @param parallelism the parallelism level
2390      * @throws IllegalArgumentException if parallelism less than or
2391      *         equal to zero, or greater than implementation limit
2392      * @throws SecurityException if a security manager exists and
2393      *         the caller is not permitted to modify threads
2394      *         because it does not hold {@link
2395      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2396      */
2397     public ForkJoinPool(int parallelism) {
2398         this(parallelism, defaultForkJoinWorkerThreadFactory, null, false,
2399              0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
2400     }
2401 
2402     /**
2403      * Creates a {@code ForkJoinPool} with the given parameters (using
2404      * defaults for others -- see {@link #ForkJoinPool(int,
2405      * ForkJoinWorkerThreadFactory, UncaughtExceptionHandler, boolean,
2406      * int, int, int, Predicate, long, TimeUnit)}).
2407      *
2408      * @param parallelism the parallelism level. For default value,
2409      * use {@link java.lang.Runtime#availableProcessors}.
2410      * @param factory the factory for creating new threads. For default value,
2411      * use {@link #defaultForkJoinWorkerThreadFactory}.
2412      * @param handler the handler for internal worker threads that
2413      * terminate due to unrecoverable errors encountered while executing
2414      * tasks. For default value, use {@code null}.
2415      * @param asyncMode if true,
2416      * establishes local first-in-first-out scheduling mode for forked
2417      * tasks that are never joined. This mode may be more appropriate
2418      * than default locally stack-based mode in applications in which
2419      * worker threads only process event-style asynchronous tasks.
2420      * For default value, use {@code false}.
2421      * @throws IllegalArgumentException if parallelism less than or
2422      *         equal to zero, or greater than implementation limit
2423      * @throws NullPointerException if the factory is null
2424      * @throws SecurityException if a security manager exists and
2425      *         the caller is not permitted to modify threads
2426      *         because it does not hold {@link
2427      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2428      */
2429     public ForkJoinPool(int parallelism,
2430                         ForkJoinWorkerThreadFactory factory,
2431                         UncaughtExceptionHandler handler,
2432                         boolean asyncMode) {
2433         this(parallelism, factory, handler, asyncMode,
2434              0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
2435     }
2436 
2437     /**
2438      * Creates a {@code ForkJoinPool} with the given parameters.
2439      *
2440      * @param parallelism the parallelism level. For default value,
2441      * use {@link java.lang.Runtime#availableProcessors}.
2442      *
2443      * @param factory the factory for creating new threads. For
2444      * default value, use {@link #defaultForkJoinWorkerThreadFactory}.
2445      *
2446      * @param handler the handler for internal worker threads that
2447      * terminate due to unrecoverable errors encountered while
2448      * executing tasks. For default value, use {@code null}.
2449      *
2450      * @param asyncMode if true, establishes local first-in-first-out
2451      * scheduling mode for forked tasks that are never joined. This
2452      * mode may be more appropriate than default locally stack-based
2453      * mode in applications in which worker threads only process
2454      * event-style asynchronous tasks.  For default value, use {@code
2455      * false}.
2456      *
2457      * @param corePoolSize the number of threads to keep in the pool
2458      * (unless timed out after an elapsed keep-alive). Normally (and
2459      * by default) this is the same value as the parallelism level,
2460      * but may be set to a larger value to reduce dynamic overhead if
2461      * tasks regularly block. Using a smaller value (for example
2462      * {@code 0}) has the same effect as the default.
2463      *
2464      * @param maximumPoolSize the maximum number of threads allowed.
2465      * When the maximum is reached, attempts to replace blocked
2466      * threads fail.  (However, because creation and termination of
2467      * different threads may overlap, and may be managed by the given
2468      * thread factory, this value may be transiently exceeded.)  To
2469      * arrange the same value as is used by default for the common
2470      * pool, use {@code 256} plus the {@code parallelism} level. (By
2471      * default, the common pool allows a maximum of 256 spare
2472      * threads.)  Using a value (for example {@code
2473      * Integer.MAX_VALUE}) larger than the implementation's total
2474      * thread limit has the same effect as using this limit (which is
2475      * the default).
2476      *
2477      * @param minimumRunnable the minimum allowed number of core
2478      * threads not blocked by a join or {@link ManagedBlocker}.  To
2479      * ensure progress, when too few unblocked threads exist and
2480      * unexecuted tasks may exist, new threads are constructed, up to
2481      * the given maximumPoolSize.  For the default value, use {@code
2482      * 1}, that ensures liveness.  A larger value might improve
2483      * throughput in the presence of blocked activities, but might
2484      * not, due to increased overhead.  A value of zero may be
2485      * acceptable when submitted tasks cannot have dependencies
2486      * requiring additional threads.
2487      *
2488      * @param saturate if non-null, a predicate invoked upon attempts
2489      * to create more than the maximum total allowed threads.  By
2490      * default, when a thread is about to block on a join or {@link
2491      * ManagedBlocker}, but cannot be replaced because the
2492      * maximumPoolSize would be exceeded, a {@link
2493      * RejectedExecutionException} is thrown.  But if this predicate
2494      * returns {@code true}, then no exception is thrown, so the pool
2495      * continues to operate with fewer than the target number of
2496      * runnable threads, which might not ensure progress.
2497      *
2498      * @param keepAliveTime the elapsed time since last use before
2499      * a thread is terminated (and then later replaced if needed).
2500      * For the default value, use {@code 60, TimeUnit.SECONDS}.
2501      *
2502      * @param unit the time unit for the {@code keepAliveTime} argument
2503      *
2504      * @throws IllegalArgumentException if parallelism is less than or
2505      *         equal to zero, or is greater than implementation limit,
2506      *         or if maximumPoolSize is less than parallelism,
2507      *         of if the keepAliveTime is less than or equal to zero.
2508      * @throws NullPointerException if the factory is null
2509      * @throws SecurityException if a security manager exists and
2510      *         the caller is not permitted to modify threads
2511      *         because it does not hold {@link
2512      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2513      * @since 9
2514      */
2515     public ForkJoinPool(int parallelism,
2516                         ForkJoinWorkerThreadFactory factory,
2517                         UncaughtExceptionHandler handler,
2518                         boolean asyncMode,
2519                         int corePoolSize,
2520                         int maximumPoolSize,
2521                         int minimumRunnable,
2522                         Predicate<? super ForkJoinPool> saturate,
2523                         long keepAliveTime,
2524                         TimeUnit unit) {
2525         checkPermission();
2526         int p = parallelism;
2527         if (p <= 0 || p > MAX_CAP || p > maximumPoolSize || keepAliveTime <= 0L)
2528             throw new IllegalArgumentException();
2529         if (factory == null || unit == null)
2530             throw new NullPointerException();
2531         this.factory = factory;
2532         this.ueh = handler;
2533         this.saturate = saturate;
2534         this.keepAlive = Math.max(unit.toMillis(keepAliveTime), TIMEOUT_SLOP);
2535         int size = 1 << (33 - Integer.numberOfLeadingZeros(p - 1));
2536         int corep = Math.min(Math.max(corePoolSize, p), MAX_CAP);
2537         int maxSpares = Math.min(maximumPoolSize, MAX_CAP) - p;
2538         int minAvail = Math.min(Math.max(minimumRunnable, 0), MAX_CAP);
2539         this.bounds = ((minAvail - p) & SMASK) | (maxSpares << SWIDTH);
2540         this.mode = p | (asyncMode ? FIFO : 0);
2541         this.ctl = ((((long)(-corep) << TC_SHIFT) & TC_MASK) |
2542                     (((long)(-p)     << RC_SHIFT) & RC_MASK));
2543         this.registrationLock = new ReentrantLock();
2544         this.queues = new WorkQueue[size];
2545         String pid = Integer.toString(getAndAddPoolIds(1) + 1);
2546         this.workerNamePrefix = "ForkJoinPool-" + pid + "-worker-";



2547     }
2548 
2549     // helper method for commonPool constructor
2550     private static Object newInstanceFromSystemProperty(String property)
2551         throws ReflectiveOperationException {
2552         String className = System.getProperty(property);
2553         return (className == null)
2554             ? null
2555             : ClassLoader.getSystemClassLoader().loadClass(className)
2556             .getConstructor().newInstance();
2557     }
2558 
2559     /**
2560      * Constructor for common pool using parameters possibly
2561      * overridden by system properties
2562      */
2563     private ForkJoinPool(byte forCommonPoolOnly) {
2564         int parallelism = Math.max(1, Runtime.getRuntime().availableProcessors() - 1);
2565         ForkJoinWorkerThreadFactory fac = null;
2566         UncaughtExceptionHandler handler = null;
2567         try {  // ignore exceptions in accessing/parsing properties
2568             fac = (ForkJoinWorkerThreadFactory) newInstanceFromSystemProperty(
2569                 "java.util.concurrent.ForkJoinPool.common.threadFactory");
2570             handler = (UncaughtExceptionHandler) newInstanceFromSystemProperty(
2571                 "java.util.concurrent.ForkJoinPool.common.exceptionHandler");
2572             String pp = System.getProperty
2573                 ("java.util.concurrent.ForkJoinPool.common.parallelism");
2574             if (pp != null)
2575                 parallelism = Integer.parseInt(pp);
2576         } catch (Exception ignore) {
2577         }
2578         this.ueh = handler;
2579         this.keepAlive = DEFAULT_KEEPALIVE;
2580         this.saturate = null;
2581         this.workerNamePrefix = null;
2582         int p = Math.min(Math.max(parallelism, 0), MAX_CAP), size;
2583         this.mode = p;
2584         if (p > 0) {
2585             size = 1 << (33 - Integer.numberOfLeadingZeros(p - 1));
2586             this.bounds = ((1 - p) & SMASK) | (COMMON_MAX_SPARES << SWIDTH);
2587             this.ctl = ((((long)(-p) << TC_SHIFT) & TC_MASK) |
2588                         (((long)(-p) << RC_SHIFT) & RC_MASK));
2589         } else {  // zero min, max, spare counts, 1 slot
2590             size = 1;
2591             this.bounds = 0;
2592             this.ctl = 0L;
2593         }
2594         this.factory = (fac != null) ? fac :
2595             new DefaultCommonPoolForkJoinWorkerThreadFactory();
2596         this.queues = new WorkQueue[size];
2597         this.registrationLock = new ReentrantLock();



2598     }
2599 
2600     /**
2601      * Returns the common pool instance. This pool is statically
2602      * constructed; its run state is unaffected by attempts to {@link
2603      * #shutdown} or {@link #shutdownNow}. However this pool and any
2604      * ongoing processing are automatically terminated upon program
2605      * {@link System#exit}.  Any program that relies on asynchronous
2606      * task processing to complete before program termination should
2607      * invoke {@code commonPool().}{@link #awaitQuiescence awaitQuiescence},
2608      * before exit.
2609      *
2610      * @return the common pool instance
2611      * @since 1.8
2612      */
2613     public static ForkJoinPool commonPool() {
2614         // assert common != null : "static init error";
2615         return common;
2616     }
2617 
2618     // Execution methods
2619 
2620     /**
2621      * Performs the given task, returning its result upon completion.
2622      * If the computation encounters an unchecked Exception or Error,
2623      * it is rethrown as the outcome of this invocation.  Rethrown
2624      * exceptions behave in the same way as regular exceptions, but,
2625      * when possible, contain stack traces (as displayed for example
2626      * using {@code ex.printStackTrace()}) of both the current thread
2627      * as well as the thread actually encountering the exception;
2628      * minimally only the latter.
2629      *
2630      * @param task the task
2631      * @param <T> the type of the task's result
2632      * @return the task's result
2633      * @throws NullPointerException if the task is null
2634      * @throws RejectedExecutionException if the task cannot be
2635      *         scheduled for execution
2636      */
2637     public <T> T invoke(ForkJoinTask<T> task) {
2638         externalSubmit(task);
2639         return task.joinForPoolInvoke(this);
2640     }
2641 
2642     /**
2643      * Arranges for (asynchronous) execution of the given task.
2644      *
2645      * @param task the task
2646      * @throws NullPointerException if the task is null
2647      * @throws RejectedExecutionException if the task cannot be
2648      *         scheduled for execution
2649      */
2650     public void execute(ForkJoinTask<?> task) {
2651         externalSubmit(task);
2652     }
2653 
2654     // AbstractExecutorService methods
2655 
2656     /**
2657      * @throws NullPointerException if the task is null
2658      * @throws RejectedExecutionException if the task cannot be
2659      *         scheduled for execution
2660      */
2661     @Override
2662     @SuppressWarnings("unchecked")
2663     public void execute(Runnable task) {
2664         externalSubmit((task instanceof ForkJoinTask<?>)
2665                        ? (ForkJoinTask<Void>) task // avoid re-wrap
2666                        : new ForkJoinTask.RunnableExecuteAction(task));
2667     }
2668 
2669     /**
2670      * Submits a ForkJoinTask for execution.
2671      *
2672      * @param task the task to submit
2673      * @param <T> the type of the task's result
2674      * @return the task
2675      * @throws NullPointerException if the task is null
2676      * @throws RejectedExecutionException if the task cannot be
2677      *         scheduled for execution
2678      */
2679     public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
2680         return externalSubmit(task);
2681     }
2682 
2683     /**
2684      * @throws NullPointerException if the task is null
2685      * @throws RejectedExecutionException if the task cannot be
2686      *         scheduled for execution
2687      */
2688     @Override
2689     public <T> ForkJoinTask<T> submit(Callable<T> task) {
2690         return externalSubmit(new ForkJoinTask.AdaptedCallable<T>(task));
2691     }
2692 
2693     /**
2694      * @throws NullPointerException if the task is null
2695      * @throws RejectedExecutionException if the task cannot be
2696      *         scheduled for execution
2697      */
2698     @Override
2699     public <T> ForkJoinTask<T> submit(Runnable task, T result) {
2700         return externalSubmit(new ForkJoinTask.AdaptedRunnable<T>(task, result));
2701     }
2702 
2703     /**
2704      * @throws NullPointerException if the task is null
2705      * @throws RejectedExecutionException if the task cannot be
2706      *         scheduled for execution
2707      */
2708     @Override
2709     @SuppressWarnings("unchecked")
2710     public ForkJoinTask<?> submit(Runnable task) {
2711         return externalSubmit((task instanceof ForkJoinTask<?>)
2712             ? (ForkJoinTask<Void>) task // avoid re-wrap
2713             : new ForkJoinTask.AdaptedRunnableAction(task));
2714     }
2715 
2716     /**
2717      * @throws NullPointerException       {@inheritDoc}
2718      * @throws RejectedExecutionException {@inheritDoc}
2719      */
2720     @Override
2721     public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
2722         ArrayList<Future<T>> futures = new ArrayList<>(tasks.size());
2723         try {
2724             for (Callable<T> t : tasks) {
2725                 ForkJoinTask<T> f =
2726                     new ForkJoinTask.AdaptedInterruptibleCallable<T>(t);
2727                 futures.add(f);
2728                 externalSubmit(f);
2729             }
2730             for (int i = futures.size() - 1; i >= 0; --i)
2731                 ((ForkJoinTask<?>)futures.get(i)).awaitPoolInvoke(this);
2732             return futures;
2733         } catch (Throwable t) {
2734             for (Future<T> e : futures)
2735                 ForkJoinTask.cancelIgnoringExceptions(e);
2736             throw t;
2737         }
2738     }
2739 
2740     @Override
2741     public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks,
2742                                          long timeout, TimeUnit unit)
2743         throws InterruptedException {
2744         long nanos = unit.toNanos(timeout);
2745         ArrayList<Future<T>> futures = new ArrayList<>(tasks.size());
2746         try {
2747             for (Callable<T> t : tasks) {
2748                 ForkJoinTask<T> f =
2749                     new ForkJoinTask.AdaptedInterruptibleCallable<T>(t);
2750                 futures.add(f);
2751                 externalSubmit(f);
2752             }
2753             long startTime = System.nanoTime(), ns = nanos;
2754             boolean timedOut = (ns < 0L);
2755             for (int i = futures.size() - 1; i >= 0; --i) {
2756                 Future<T> f = futures.get(i);
2757                 if (!f.isDone()) {
2758                     if (timedOut)
2759                         ForkJoinTask.cancelIgnoringExceptions(f);
2760                     else {
2761                         ((ForkJoinTask<T>)f).awaitPoolInvoke(this, ns);
2762                         if ((ns = nanos - (System.nanoTime() - startTime)) < 0L)
2763                             timedOut = true;
2764                     }
2765                 }
2766             }
2767             return futures;
2768         } catch (Throwable t) {
2769             for (Future<T> e : futures)
2770                 ForkJoinTask.cancelIgnoringExceptions(e);
2771             throw t;
2772         }
2773     }
2774 
2775     // Task to hold results from InvokeAnyTasks
2776     static final class InvokeAnyRoot<E> extends ForkJoinTask<E> {
2777         private static final long serialVersionUID = 2838392045355241008L;
2778         @SuppressWarnings("serial") // Conditionally serializable
2779         volatile E result;
2780         final AtomicInteger count;  // in case all throw
2781         @SuppressWarnings("serial")
2782         final ForkJoinPool pool;    // to check shutdown while collecting
2783         InvokeAnyRoot(int n, ForkJoinPool p) {
2784             pool = p;
2785             count = new AtomicInteger(n);
2786         }
2787         final void tryComplete(Callable<E> c) { // called by InvokeAnyTasks
2788             Throwable ex = null;
2789             boolean failed;
2790             if (c == null || Thread.interrupted() ||
2791                 (pool != null && pool.mode < 0))
2792                 failed = true;
2793             else if (isDone())
2794                 failed = false;
2795             else {
2796                 try {
2797                     complete(c.call());
2798                     failed = false;
2799                 } catch (Throwable tx) {
2800                     ex = tx;
2801                     failed = true;
2802                 }
2803             }
2804             if ((pool != null && pool.mode < 0) ||
2805                 (failed && count.getAndDecrement() <= 1))
2806                 trySetThrown(ex != null ? ex : new CancellationException());
2807         }
2808         public final boolean exec()         { return false; } // never forked
2809         public final E getRawResult()       { return result; }
2810         public final void setRawResult(E v) { result = v; }
2811     }
2812 
2813     // Variant of AdaptedInterruptibleCallable with results in InvokeAnyRoot
2814     static final class InvokeAnyTask<E> extends ForkJoinTask<E> {
2815         private static final long serialVersionUID = 2838392045355241008L;
2816         final InvokeAnyRoot<E> root;
2817         @SuppressWarnings("serial") // Conditionally serializable
2818         final Callable<E> callable;
2819         transient volatile Thread runner;
2820         InvokeAnyTask(InvokeAnyRoot<E> root, Callable<E> callable) {
2821             this.root = root;
2822             this.callable = callable;
2823         }
2824         public final boolean exec() {
2825             Thread.interrupted();
2826             runner = Thread.currentThread();
2827             root.tryComplete(callable);
2828             runner = null;
2829             Thread.interrupted();
2830             return true;
2831         }
2832         public final boolean cancel(boolean mayInterruptIfRunning) {
2833             Thread t;
2834             boolean stat = super.cancel(false);
2835             if (mayInterruptIfRunning && (t = runner) != null) {
2836                 try {
2837                     t.interrupt();
2838                 } catch (Throwable ignore) {
2839                 }
2840             }
2841             return stat;
2842         }
2843         public final void setRawResult(E v) {} // unused
2844         public final E getRawResult()       { return null; }
2845     }
2846 
2847     @Override
2848     public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
2849         throws InterruptedException, ExecutionException {
2850         int n = tasks.size();
2851         if (n <= 0)
2852             throw new IllegalArgumentException();
2853         InvokeAnyRoot<T> root = new InvokeAnyRoot<T>(n, this);
2854         ArrayList<InvokeAnyTask<T>> fs = new ArrayList<>(n);
2855         try {
2856             for (Callable<T> c : tasks) {
2857                 if (c == null)
2858                     throw new NullPointerException();
2859                 InvokeAnyTask<T> f = new InvokeAnyTask<T>(root, c);
2860                 fs.add(f);
2861                 externalSubmit(f);
2862                 if (root.isDone())
2863                     break;
2864             }
2865             return root.getForPoolInvoke(this);
2866         } finally {
2867             for (InvokeAnyTask<T> f : fs)
2868                 ForkJoinTask.cancelIgnoringExceptions(f);
2869         }
2870     }
2871 
2872     @Override
2873     public <T> T invokeAny(Collection<? extends Callable<T>> tasks,
2874                            long timeout, TimeUnit unit)
2875         throws InterruptedException, ExecutionException, TimeoutException {
2876         long nanos = unit.toNanos(timeout);
2877         int n = tasks.size();
2878         if (n <= 0)
2879             throw new IllegalArgumentException();
2880         InvokeAnyRoot<T> root = new InvokeAnyRoot<T>(n, this);
2881         ArrayList<InvokeAnyTask<T>> fs = new ArrayList<>(n);
2882         try {
2883             for (Callable<T> c : tasks) {
2884                 if (c == null)
2885                     throw new NullPointerException();
2886                 InvokeAnyTask<T> f = new InvokeAnyTask<T>(root, c);
2887                 fs.add(f);
2888                 externalSubmit(f);
2889                 if (root.isDone())
2890                     break;
2891             }
2892             return root.getForPoolInvoke(this, nanos);
2893         } finally {
2894             for (InvokeAnyTask<T> f : fs)
2895                 ForkJoinTask.cancelIgnoringExceptions(f);
2896         }
2897     }
2898 
2899     /**
2900      * Returns the factory used for constructing new workers.
2901      *
2902      * @return the factory used for constructing new workers
2903      */
2904     public ForkJoinWorkerThreadFactory getFactory() {
2905         return factory;
2906     }
2907 
2908     /**
2909      * Returns the handler for internal worker threads that terminate
2910      * due to unrecoverable errors encountered while executing tasks.
2911      *
2912      * @return the handler, or {@code null} if none
2913      */
2914     public UncaughtExceptionHandler getUncaughtExceptionHandler() {
2915         return ueh;
2916     }
2917 
2918     /**
2919      * Returns the targeted parallelism level of this pool.
2920      *
2921      * @return the targeted parallelism level of this pool
2922      */
2923     public int getParallelism() {
2924         int par = mode & SMASK;
2925         return (par > 0) ? par : 1;
2926     }
2927 
2928     /**
2929      * Returns the targeted parallelism level of the common pool.
2930      *
2931      * @return the targeted parallelism level of the common pool
2932      * @since 1.8
2933      */
2934     public static int getCommonPoolParallelism() {
2935         return COMMON_PARALLELISM;
2936     }
2937 
2938     /**
2939      * Returns the number of worker threads that have started but not
2940      * yet terminated.  The result returned by this method may differ
2941      * from {@link #getParallelism} when threads are created to
2942      * maintain parallelism when others are cooperatively blocked.
2943      *
2944      * @return the number of worker threads
2945      */
2946     public int getPoolSize() {
2947         return ((mode & SMASK) + (short)(ctl >>> TC_SHIFT));
2948     }
2949 
2950     /**
2951      * Returns {@code true} if this pool uses local first-in-first-out
2952      * scheduling mode for forked tasks that are never joined.
2953      *
2954      * @return {@code true} if this pool uses async mode
2955      */
2956     public boolean getAsyncMode() {
2957         return (mode & FIFO) != 0;
2958     }
2959 
2960     /**
2961      * Returns an estimate of the number of worker threads that are
2962      * not blocked waiting to join tasks or for other managed
2963      * synchronization. This method may overestimate the
2964      * number of running threads.
2965      *
2966      * @return the number of worker threads
2967      */
2968     public int getRunningThreadCount() {
2969         VarHandle.acquireFence();
2970         WorkQueue[] qs; WorkQueue q;
2971         int rc = 0;
2972         if ((qs = queues) != null) {
2973             for (int i = 1; i < qs.length; i += 2) {
2974                 if ((q = qs[i]) != null && q.isApparentlyUnblocked())
2975                     ++rc;
2976             }
2977         }
2978         return rc;
2979     }
2980 
2981     /**
2982      * Returns an estimate of the number of threads that are currently
2983      * stealing or executing tasks. This method may overestimate the
2984      * number of active threads.
2985      *
2986      * @return the number of active threads
2987      */
2988     public int getActiveThreadCount() {
2989         int r = (mode & SMASK) + (int)(ctl >> RC_SHIFT);
2990         return (r <= 0) ? 0 : r; // suppress momentarily negative values
2991     }
2992 
2993     /**
2994      * Returns {@code true} if all worker threads are currently idle.
2995      * An idle worker is one that cannot obtain a task to execute
2996      * because none are available to steal from other threads, and
2997      * there are no pending submissions to the pool. This method is
2998      * conservative; it might not return {@code true} immediately upon
2999      * idleness of all threads, but will eventually become true if
3000      * threads remain inactive.
3001      *
3002      * @return {@code true} if all threads are currently idle
3003      */
3004     public boolean isQuiescent() {
3005         return canStop();
3006     }
3007 
3008     /**
3009      * Returns an estimate of the total number of completed tasks that
3010      * were executed by a thread other than their submitter. The
3011      * reported value underestimates the actual total number of steals
3012      * when the pool is not quiescent. This value may be useful for
3013      * monitoring and tuning fork/join programs: in general, steal
3014      * counts should be high enough to keep threads busy, but low
3015      * enough to avoid overhead and contention across threads.
3016      *
3017      * @return the number of steals
3018      */
3019     public long getStealCount() {
3020         long count = stealCount;
3021         WorkQueue[] qs; WorkQueue q;
3022         if ((qs = queues) != null) {
3023             for (int i = 1; i < qs.length; i += 2) {
3024                 if ((q = qs[i]) != null)
3025                     count += (long)q.nsteals & 0xffffffffL;
3026             }
3027         }
3028         return count;
3029     }
3030 
3031     /**
3032      * Returns an estimate of the total number of tasks currently held
3033      * in queues by worker threads (but not including tasks submitted
3034      * to the pool that have not begun executing). This value is only
3035      * an approximation, obtained by iterating across all threads in
3036      * the pool. This method may be useful for tuning task
3037      * granularities.
3038      *
3039      * @return the number of queued tasks
3040      */
3041     public long getQueuedTaskCount() {
3042         VarHandle.acquireFence();
3043         WorkQueue[] qs; WorkQueue q;
3044         int count = 0;
3045         if ((qs = queues) != null) {
3046             for (int i = 1; i < qs.length; i += 2) {
3047                 if ((q = qs[i]) != null)
3048                     count += q.queueSize();
3049             }
3050         }
3051         return count;
3052     }
3053 
3054     /**
3055      * Returns an estimate of the number of tasks submitted to this
3056      * pool that have not yet begun executing.  This method may take
3057      * time proportional to the number of submissions.
3058      *
3059      * @return the number of queued submissions
3060      */
3061     public int getQueuedSubmissionCount() {
3062         VarHandle.acquireFence();
3063         WorkQueue[] qs; WorkQueue q;
3064         int count = 0;
3065         if ((qs = queues) != null) {
3066             for (int i = 0; i < qs.length; i += 2) {
3067                 if ((q = qs[i]) != null)
3068                     count += q.queueSize();
3069             }
3070         }
3071         return count;
3072     }
3073 
3074     /**
3075      * Returns {@code true} if there are any tasks submitted to this
3076      * pool that have not yet begun executing.
3077      *
3078      * @return {@code true} if there are any queued submissions
3079      */
3080     public boolean hasQueuedSubmissions() {
3081         VarHandle.acquireFence();
3082         WorkQueue[] qs; WorkQueue q;
3083         if ((qs = queues) != null) {
3084             for (int i = 0; i < qs.length; i += 2) {
3085                 if ((q = qs[i]) != null && !q.isEmpty())
3086                     return true;
3087             }
3088         }
3089         return false;
3090     }
3091 
3092     /**
3093      * Removes and returns the next unexecuted submission if one is
3094      * available.  This method may be useful in extensions to this
3095      * class that re-assign work in systems with multiple pools.
3096      *
3097      * @return the next submission, or {@code null} if none
3098      */
3099     protected ForkJoinTask<?> pollSubmission() {
3100         return pollScan(true);
3101     }
3102 
3103     /**
3104      * Removes all available unexecuted submitted and forked tasks
3105      * from scheduling queues and adds them to the given collection,
3106      * without altering their execution status. These may include
3107      * artificially generated or wrapped tasks. This method is
3108      * designed to be invoked only when the pool is known to be
3109      * quiescent. Invocations at other times may not remove all
3110      * tasks. A failure encountered while attempting to add elements
3111      * to collection {@code c} may result in elements being in
3112      * neither, either or both collections when the associated
3113      * exception is thrown.  The behavior of this operation is
3114      * undefined if the specified collection is modified while the
3115      * operation is in progress.
3116      *
3117      * @param c the collection to transfer elements into
3118      * @return the number of elements transferred
3119      */
3120     protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
3121         int count = 0;
3122         for (ForkJoinTask<?> t; (t = pollScan(false)) != null; ) {
3123             c.add(t);
3124             ++count;
3125         }
3126         return count;
3127     }
3128 
3129     /**
3130      * Returns a string identifying this pool, as well as its state,
3131      * including indications of run state, parallelism level, and
3132      * worker and task counts.
3133      *
3134      * @return a string identifying this pool, as well as its state
3135      */
3136     public String toString() {
3137         // Use a single pass through queues to collect counts
3138         int md = mode; // read volatile fields first
3139         long c = ctl;
3140         long st = stealCount;
3141         long qt = 0L, ss = 0L; int rc = 0;
3142         WorkQueue[] qs; WorkQueue q;
3143         if ((qs = queues) != null) {
3144             for (int i = 0; i < qs.length; ++i) {
3145                 if ((q = qs[i]) != null) {
3146                     int size = q.queueSize();
3147                     if ((i & 1) == 0)
3148                         ss += size;
3149                     else {
3150                         qt += size;
3151                         st += (long)q.nsteals & 0xffffffffL;
3152                         if (q.isApparentlyUnblocked())
3153                             ++rc;
3154                     }
3155                 }
3156             }
3157         }
3158 
3159         int pc = (md & SMASK);
3160         int tc = pc + (short)(c >>> TC_SHIFT);
3161         int ac = pc + (int)(c >> RC_SHIFT);
3162         if (ac < 0) // ignore transient negative
3163             ac = 0;
3164         String level = ((md & TERMINATED) != 0 ? "Terminated" :
3165                         (md & STOP)       != 0 ? "Terminating" :
3166                         (md & SHUTDOWN)   != 0 ? "Shutting down" :
3167                         "Running");
3168         return super.toString() +
3169             "[" + level +
3170             ", parallelism = " + pc +
3171             ", size = " + tc +
3172             ", active = " + ac +
3173             ", running = " + rc +
3174             ", steals = " + st +
3175             ", tasks = " + qt +
3176             ", submissions = " + ss +
3177             "]";
3178     }
3179 
3180     /**
3181      * Possibly initiates an orderly shutdown in which previously
3182      * submitted tasks are executed, but no new tasks will be
3183      * accepted. Invocation has no effect on execution state if this
3184      * is the {@link #commonPool()}, and no additional effect if
3185      * already shut down.  Tasks that are in the process of being
3186      * submitted concurrently during the course of this method may or
3187      * may not be rejected.
3188      *
3189      * @throws SecurityException if a security manager exists and
3190      *         the caller is not permitted to modify threads
3191      *         because it does not hold {@link
3192      *         java.lang.RuntimePermission}{@code ("modifyThread")}
3193      */
3194     public void shutdown() {
3195         checkPermission();
3196         if (this != common)
3197             tryTerminate(false, true);
3198     }
3199 
3200     /**
3201      * Possibly attempts to cancel and/or stop all tasks, and reject
3202      * all subsequently submitted tasks.  Invocation has no effect on
3203      * execution state if this is the {@link #commonPool()}, and no
3204      * additional effect if already shut down. Otherwise, tasks that
3205      * are in the process of being submitted or executed concurrently
3206      * during the course of this method may or may not be
3207      * rejected. This method cancels both existing and unexecuted
3208      * tasks, in order to permit termination in the presence of task
3209      * dependencies. So the method always returns an empty list
3210      * (unlike the case for some other Executors).
3211      *
3212      * @return an empty list
3213      * @throws SecurityException if a security manager exists and
3214      *         the caller is not permitted to modify threads
3215      *         because it does not hold {@link
3216      *         java.lang.RuntimePermission}{@code ("modifyThread")}
3217      */
3218     public List<Runnable> shutdownNow() {
3219         checkPermission();
3220         if (this != common)
3221             tryTerminate(true, true);
3222         return Collections.emptyList();
3223     }
3224 
3225     /**
3226      * Returns {@code true} if all tasks have completed following shut down.
3227      *
3228      * @return {@code true} if all tasks have completed following shut down
3229      */
3230     public boolean isTerminated() {
3231         return (mode & TERMINATED) != 0;
3232     }
3233 
3234     /**
3235      * Returns {@code true} if the process of termination has
3236      * commenced but not yet completed.  This method may be useful for
3237      * debugging. A return of {@code true} reported a sufficient
3238      * period after shutdown may indicate that submitted tasks have
3239      * ignored or suppressed interruption, or are waiting for I/O,
3240      * causing this executor not to properly terminate. (See the
3241      * advisory notes for class {@link ForkJoinTask} stating that
3242      * tasks should not normally entail blocking operations.  But if
3243      * they do, they must abort them on interrupt.)
3244      *
3245      * @return {@code true} if terminating but not yet terminated
3246      */
3247     public boolean isTerminating() {
3248         return (mode & (STOP | TERMINATED)) == STOP;
3249     }
3250 
3251     /**
3252      * Returns {@code true} if this pool has been shut down.
3253      *
3254      * @return {@code true} if this pool has been shut down
3255      */
3256     public boolean isShutdown() {
3257         return (mode & SHUTDOWN) != 0;
3258     }
3259 
3260     /**
3261      * Blocks until all tasks have completed execution after a
3262      * shutdown request, or the timeout occurs, or the current thread
3263      * is interrupted, whichever happens first. Because the {@link
3264      * #commonPool()} never terminates until program shutdown, when
3265      * applied to the common pool, this method is equivalent to {@link
3266      * #awaitQuiescence(long, TimeUnit)} but always returns {@code false}.
3267      *
3268      * @param timeout the maximum time to wait
3269      * @param unit the time unit of the timeout argument
3270      * @return {@code true} if this executor terminated and
3271      *         {@code false} if the timeout elapsed before termination
3272      * @throws InterruptedException if interrupted while waiting
3273      */
3274     public boolean awaitTermination(long timeout, TimeUnit unit)
3275         throws InterruptedException {
3276         ReentrantLock lock; Condition cond;
3277         long nanos = unit.toNanos(timeout);
3278         boolean terminated = false;
3279         if (this == common) {
3280             Thread t; ForkJoinWorkerThread wt; int q;
3281             if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread &&
3282                 (wt = (ForkJoinWorkerThread)t).pool == this)
3283                 q = helpQuiescePool(wt.workQueue, nanos, true);
3284             else
3285                 q = externalHelpQuiescePool(nanos, true);
3286             if (q < 0)
3287                 throw new InterruptedException();
3288         }
3289         else if (!(terminated = ((mode & TERMINATED) != 0)) &&
3290                  (lock = registrationLock) != null) {
3291             lock.lock();
3292             try {
3293                 if ((cond = termination) == null)
3294                     termination = cond = lock.newCondition();
3295                 while (!(terminated = ((mode & TERMINATED) != 0)) && nanos > 0L)
3296                     nanos = cond.awaitNanos(nanos);
3297             } finally {
3298                 lock.unlock();
3299             }
3300         }
3301         return terminated;
3302     }
3303 
3304     /**
3305      * If called by a ForkJoinTask operating in this pool, equivalent
3306      * in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
3307      * waits and/or attempts to assist performing tasks until this
3308      * pool {@link #isQuiescent} or the indicated timeout elapses.
3309      *
3310      * @param timeout the maximum time to wait
3311      * @param unit the time unit of the timeout argument
3312      * @return {@code true} if quiescent; {@code false} if the
3313      * timeout elapsed.
3314      */
3315     public boolean awaitQuiescence(long timeout, TimeUnit unit) {
3316         Thread t; ForkJoinWorkerThread wt; int q;
3317         long nanos = unit.toNanos(timeout);
3318         if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread &&
3319             (wt = (ForkJoinWorkerThread)t).pool == this)
3320             q = helpQuiescePool(wt.workQueue, nanos, false);
3321         else
3322             q = externalHelpQuiescePool(nanos, false);
3323         return (q > 0);
3324     }
3325 
3326     /**
3327      * Interface for extending managed parallelism for tasks running
3328      * in {@link ForkJoinPool}s.
3329      *
3330      * <p>A {@code ManagedBlocker} provides two methods.  Method
3331      * {@link #isReleasable} must return {@code true} if blocking is
3332      * not necessary. Method {@link #block} blocks the current thread
3333      * if necessary (perhaps internally invoking {@code isReleasable}
3334      * before actually blocking). These actions are performed by any
3335      * thread invoking {@link
3336      * ForkJoinPool#managedBlock(ManagedBlocker)}.  The unusual
3337      * methods in this API accommodate synchronizers that may, but
3338      * don't usually, block for long periods. Similarly, they allow
3339      * more efficient internal handling of cases in which additional
3340      * workers may be, but usually are not, needed to ensure
3341      * sufficient parallelism.  Toward this end, implementations of
3342      * method {@code isReleasable} must be amenable to repeated
3343      * invocation. Neither method is invoked after a prior invocation
3344      * of {@code isReleasable} or {@code block} returns {@code true}.
3345      *
3346      * <p>For example, here is a ManagedBlocker based on a
3347      * ReentrantLock:
3348      * <pre> {@code
3349      * class ManagedLocker implements ManagedBlocker {
3350      *   final ReentrantLock lock;
3351      *   boolean hasLock = false;
3352      *   ManagedLocker(ReentrantLock lock) { this.lock = lock; }
3353      *   public boolean block() {
3354      *     if (!hasLock)
3355      *       lock.lock();
3356      *     return true;
3357      *   }
3358      *   public boolean isReleasable() {
3359      *     return hasLock || (hasLock = lock.tryLock());
3360      *   }
3361      * }}</pre>
3362      *
3363      * <p>Here is a class that possibly blocks waiting for an
3364      * item on a given queue:
3365      * <pre> {@code
3366      * class QueueTaker<E> implements ManagedBlocker {
3367      *   final BlockingQueue<E> queue;
3368      *   volatile E item = null;
3369      *   QueueTaker(BlockingQueue<E> q) { this.queue = q; }
3370      *   public boolean block() throws InterruptedException {
3371      *     if (item == null)
3372      *       item = queue.take();
3373      *     return true;
3374      *   }
3375      *   public boolean isReleasable() {
3376      *     return item != null || (item = queue.poll()) != null;
3377      *   }
3378      *   public E getItem() { // call after pool.managedBlock completes
3379      *     return item;
3380      *   }
3381      * }}</pre>
3382      */
3383     public static interface ManagedBlocker {
3384         /**
3385          * Possibly blocks the current thread, for example waiting for
3386          * a lock or condition.
3387          *
3388          * @return {@code true} if no additional blocking is necessary
3389          * (i.e., if isReleasable would return true)
3390          * @throws InterruptedException if interrupted while waiting
3391          * (the method is not required to do so, but is allowed to)
3392          */
3393         boolean block() throws InterruptedException;
3394 
3395         /**
3396          * Returns {@code true} if blocking is unnecessary.
3397          * @return {@code true} if blocking is unnecessary
3398          */
3399         boolean isReleasable();
3400     }
3401 
3402     /**
3403      * Runs the given possibly blocking task.  When {@linkplain
3404      * ForkJoinTask#inForkJoinPool() running in a ForkJoinPool}, this
3405      * method possibly arranges for a spare thread to be activated if
3406      * necessary to ensure sufficient parallelism while the current
3407      * thread is blocked in {@link ManagedBlocker#block blocker.block()}.
3408      *
3409      * <p>This method repeatedly calls {@code blocker.isReleasable()} and
3410      * {@code blocker.block()} until either method returns {@code true}.
3411      * Every call to {@code blocker.block()} is preceded by a call to
3412      * {@code blocker.isReleasable()} that returned {@code false}.
3413      *
3414      * <p>If not running in a ForkJoinPool, this method is
3415      * behaviorally equivalent to
3416      * <pre> {@code
3417      * while (!blocker.isReleasable())
3418      *   if (blocker.block())
3419      *     break;}</pre>
3420      *
3421      * If running in a ForkJoinPool, the pool may first be expanded to
3422      * ensure sufficient parallelism available during the call to
3423      * {@code blocker.block()}.
3424      *
3425      * @param blocker the blocker task
3426      * @throws InterruptedException if {@code blocker.block()} did so
3427      */
3428     public static void managedBlock(ManagedBlocker blocker)
3429         throws InterruptedException {
3430         Thread t; ForkJoinPool p;
3431         if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread &&
3432             (p = ((ForkJoinWorkerThread)t).pool) != null)
3433             p.compensatedBlock(blocker);
3434         else
3435             unmanagedBlock(blocker);
3436     }
3437 
3438     /** ManagedBlock for ForkJoinWorkerThreads */
3439     private void compensatedBlock(ManagedBlocker blocker)
3440         throws InterruptedException {
3441         if (blocker == null) throw new NullPointerException();
3442         for (;;) {
3443             int comp; boolean done;
3444             long c = ctl;
3445             if (blocker.isReleasable())
3446                 break;
3447             if ((comp = tryCompensate(c)) >= 0) {
3448                 long post = (comp == 0) ? 0L : RC_UNIT;
3449                 try {
3450                     done = blocker.block();
3451                 } finally {
3452                     getAndAddCtl(post);
3453                 }
3454                 if (done)
3455                     break;
3456             }
3457         }
3458     }
3459 
3460     /** ManagedBlock for external threads */
3461     private static void unmanagedBlock(ManagedBlocker blocker)
3462         throws InterruptedException {
3463         if (blocker == null) throw new NullPointerException();
3464         do {} while (!blocker.isReleasable() && !blocker.block());
3465     }
3466 
3467     // AbstractExecutorService.newTaskFor overrides rely on
3468     // undocumented fact that ForkJoinTask.adapt returns ForkJoinTasks
3469     // that also implement RunnableFuture.
3470 
3471     @Override
3472     protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
3473         return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
3474     }
3475 
3476     @Override
3477     protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
3478         return new ForkJoinTask.AdaptedCallable<T>(callable);
3479     }
3480 
3481     static {
3482         try {
3483             MethodHandles.Lookup l = MethodHandles.lookup();
3484             CTL = l.findVarHandle(ForkJoinPool.class, "ctl", long.class);
3485             MODE = l.findVarHandle(ForkJoinPool.class, "mode", int.class);
3486             THREADIDS = l.findVarHandle(ForkJoinPool.class, "threadIds", int.class);
3487             POOLIDS = l.findStaticVarHandle(ForkJoinPool.class, "poolIds", int.class);
3488         } catch (ReflectiveOperationException e) {
3489             throw new ExceptionInInitializerError(e);
3490         }
3491 
3492         // Reduce the risk of rare disastrous classloading in first call to
3493         // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
3494         Class<?> ensureLoaded = LockSupport.class;
3495 
3496         int commonMaxSpares = DEFAULT_COMMON_MAX_SPARES;
3497         try {
3498             String p = System.getProperty
3499                 ("java.util.concurrent.ForkJoinPool.common.maximumSpares");
3500             if (p != null)
3501                 commonMaxSpares = Integer.parseInt(p);
3502         } catch (Exception ignore) {}
3503         COMMON_MAX_SPARES = commonMaxSpares;
3504 
3505         defaultForkJoinWorkerThreadFactory =
3506             new DefaultForkJoinWorkerThreadFactory();
3507         modifyThreadPermission = new RuntimePermission("modifyThread");
3508         @SuppressWarnings("removal")
3509         ForkJoinPool tmp = AccessController.doPrivileged(new PrivilegedAction<>() {
3510             public ForkJoinPool run() {
3511                 return new ForkJoinPool((byte)0); }});
3512         common = tmp;













3513 
3514         COMMON_PARALLELISM = Math.max(common.mode & SMASK, 1);
































3515     }
3516 }
--- EOF ---