1 /*
   2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.  Oracle designates this
   7  * particular file as subject to the "Classpath" exception as provided
   8  * by Oracle in the LICENSE file that accompanied this code.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  */
  24 
  25 /*
  26  * This file is available under and governed by the GNU General Public
  27  * License version 2 only, as published by the Free Software Foundation.
  28  * However, the following notice accompanied the original version of this
  29  * file:
  30  *
  31  * Written by Doug Lea with assistance from members of JCP JSR-166
  32  * Expert Group and released to the public domain, as explained at
  33  * http://creativecommons.org/publicdomain/zero/1.0/
  34  */
  35 
  36 package java.util.concurrent;
  37 
  38 import java.lang.Thread.UncaughtExceptionHandler;
  39 import java.lang.invoke.MethodHandles;
  40 import java.lang.invoke.VarHandle;
  41 import java.security.AccessController;
  42 import java.security.AccessControlContext;
  43 import java.security.Permission;
  44 import java.security.Permissions;
  45 import java.security.PrivilegedAction;
  46 import java.security.ProtectionDomain;
  47 import java.util.ArrayList;
  48 import java.util.Collection;
  49 import java.util.Collections;
  50 import java.util.List;
  51 import java.util.function.Predicate;
  52 import java.util.concurrent.atomic.AtomicInteger;
  53 import java.util.concurrent.locks.LockSupport;
  54 import java.util.concurrent.locks.ReentrantLock;
  55 import java.util.concurrent.locks.Condition;
  56 import jdk.internal.misc.VirtualThreads;
  57 import jdk.internal.vm.SharedThreadContainer;
  58 
  59 /**
  60  * An {@link ExecutorService} for running {@link ForkJoinTask}s.
  61  * A {@code ForkJoinPool} provides the entry point for submissions
  62  * from non-{@code ForkJoinTask} clients, as well as management and
  63  * monitoring operations.
  64  *
  65  * <p>A {@code ForkJoinPool} differs from other kinds of {@link
  66  * ExecutorService} mainly by virtue of employing
  67  * <em>work-stealing</em>: all threads in the pool attempt to find and
  68  * execute tasks submitted to the pool and/or created by other active
  69  * tasks (eventually blocking waiting for work if none exist). This
  70  * enables efficient processing when most tasks spawn other subtasks
  71  * (as do most {@code ForkJoinTask}s), as well as when many small
  72  * tasks are submitted to the pool from external clients.  Especially
  73  * when setting <em>asyncMode</em> to true in constructors, {@code
  74  * ForkJoinPool}s may also be appropriate for use with event-style
  75  * tasks that are never joined. All worker threads are initialized
  76  * with {@link Thread#isDaemon} set {@code true}.
  77  *
  78  * <p>A static {@link #commonPool()} is available and appropriate for
  79  * most applications. The common pool is used by any ForkJoinTask that
  80  * is not explicitly submitted to a specified pool. Using the common
  81  * pool normally reduces resource usage (its threads are slowly
  82  * reclaimed during periods of non-use, and reinstated upon subsequent
  83  * use).
  84  *
  85  * <p>For applications that require separate or custom pools, a {@code
  86  * ForkJoinPool} may be constructed with a given target parallelism
  87  * level; by default, equal to the number of available processors.
  88  * The pool attempts to maintain enough active (or available) threads
  89  * by dynamically adding, suspending, or resuming internal worker
  90  * threads, even if some tasks are stalled waiting to join others.
  91  * However, no such adjustments are guaranteed in the face of blocked
  92  * I/O or other unmanaged synchronization. The nested {@link
  93  * ManagedBlocker} interface enables extension of the kinds of
  94  * synchronization accommodated. The default policies may be
  95  * overridden using a constructor with parameters corresponding to
  96  * those documented in class {@link ThreadPoolExecutor}.
  97  *
  98  * <p>In addition to execution and lifecycle control methods, this
  99  * class provides status check methods (for example
 100  * {@link #getStealCount}) that are intended to aid in developing,
 101  * tuning, and monitoring fork/join applications. Also, method
 102  * {@link #toString} returns indications of pool state in a
 103  * convenient form for informal monitoring.
 104  *
 105  * <p>As is the case with other ExecutorServices, there are three
 106  * main task execution methods summarized in the following table.
 107  * These are designed to be used primarily by clients not already
 108  * engaged in fork/join computations in the current pool.  The main
 109  * forms of these methods accept instances of {@code ForkJoinTask},
 110  * but overloaded forms also allow mixed execution of plain {@code
 111  * Runnable}- or {@code Callable}- based activities as well.  However,
 112  * tasks that are already executing in a pool should normally instead
 113  * use the within-computation forms listed in the table unless using
 114  * async event-style tasks that are not usually joined, in which case
 115  * there is little difference among choice of methods.
 116  *
 117  * <table class="plain">
 118  * <caption>Summary of task execution methods</caption>
 119  *  <tr>
 120  *    <td></td>
 121  *    <th scope="col"> Call from non-fork/join clients</th>
 122  *    <th scope="col"> Call from within fork/join computations</th>
 123  *  </tr>
 124  *  <tr>
 125  *    <th scope="row" style="text-align:left"> Arrange async execution</th>
 126  *    <td> {@link #execute(ForkJoinTask)}</td>
 127  *    <td> {@link ForkJoinTask#fork}</td>
 128  *  </tr>
 129  *  <tr>
 130  *    <th scope="row" style="text-align:left"> Await and obtain result</th>
 131  *    <td> {@link #invoke(ForkJoinTask)}</td>
 132  *    <td> {@link ForkJoinTask#invoke}</td>
 133  *  </tr>
 134  *  <tr>
 135  *    <th scope="row" style="text-align:left"> Arrange exec and obtain Future</th>
 136  *    <td> {@link #submit(ForkJoinTask)}</td>
 137  *    <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
 138  *  </tr>
 139  * </table>
 140  *
 141  * <p>The parameters used to construct the common pool may be controlled by
 142  * setting the following {@linkplain System#getProperty system properties}:
 143  * <ul>
 144  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.parallelism}
 145  * - the parallelism level, a non-negative integer
 146  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.threadFactory}
 147  * - the class name of a {@link ForkJoinWorkerThreadFactory}.
 148  * The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
 149  * is used to load this class.
 150  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.exceptionHandler}
 151  * - the class name of a {@link UncaughtExceptionHandler}.
 152  * The {@linkplain ClassLoader#getSystemClassLoader() system class loader}
 153  * is used to load this class.
 154  * <li>{@systemProperty java.util.concurrent.ForkJoinPool.common.maximumSpares}
 155  * - the maximum number of allowed extra threads to maintain target
 156  * parallelism (default 256).
 157  * </ul>
 158  * If no thread factory is supplied via a system property, then the
 159  * common pool uses a factory that uses the system class loader as the
 160  * {@linkplain Thread#getContextClassLoader() thread context class loader}.
 161  * In addition, if a {@link SecurityManager} is present, then
 162  * the common pool uses a factory supplying threads that have no
 163  * {@link Permissions} enabled.
 164  *
 165  * Upon any error in establishing these settings, default parameters
 166  * are used. It is possible to disable or limit the use of threads in
 167  * the common pool by setting the parallelism property to zero, and/or
 168  * using a factory that may return {@code null}. However doing so may
 169  * cause unjoined tasks to never be executed.
 170  *
 171  * <p><b>Implementation notes:</b> This implementation restricts the
 172  * maximum number of running threads to 32767. Attempts to create
 173  * pools with greater than the maximum number result in
 174  * {@code IllegalArgumentException}.
 175  *
 176  * <p>This implementation rejects submitted tasks (that is, by throwing
 177  * {@link RejectedExecutionException}) only when the pool is shut down
 178  * or internal resources have been exhausted.
 179  *
 180  * @since 1.7
 181  * @author Doug Lea
 182  */
 183 public class ForkJoinPool extends AbstractExecutorService {
 184 
 185     /*
 186      * Implementation Overview
 187      *
 188      * This class and its nested classes provide the main
 189      * functionality and control for a set of worker threads:
 190      * Submissions from non-FJ threads enter into submission queues.
 191      * Workers take these tasks and typically split them into subtasks
 192      * that may be stolen by other workers. Work-stealing based on
 193      * randomized scans generally leads to better throughput than
 194      * "work dealing" in which producers assign tasks to idle threads,
 195      * in part because threads that have finished other tasks before
 196      * the signalled thread wakes up (which can be a long time) can
 197      * take the task instead.  Preference rules give first priority to
 198      * processing tasks from their own queues (LIFO or FIFO, depending
 199      * on mode), then to randomized FIFO steals of tasks in other
 200      * queues.  This framework began as vehicle for supporting
 201      * tree-structured parallelism using work-stealing.  Over time,
 202      * its scalability advantages led to extensions and changes to
 203      * better support more diverse usage contexts.  Because most
 204      * internal methods and nested classes are interrelated, their
 205      * main rationale and descriptions are presented here; individual
 206      * methods and nested classes contain only brief comments about
 207      * details.
 208      *
 209      * WorkQueues
 210      * ==========
 211      *
 212      * Most operations occur within work-stealing queues (in nested
 213      * class WorkQueue).  These are special forms of Deques that
 214      * support only three of the four possible end-operations -- push,
 215      * pop, and poll (aka steal), under the further constraints that
 216      * push and pop are called only from the owning thread (or, as
 217      * extended here, under a lock), while poll may be called from
 218      * other threads.  (If you are unfamiliar with them, you probably
 219      * want to read Herlihy and Shavit's book "The Art of
 220      * Multiprocessor programming", chapter 16 describing these in
 221      * more detail before proceeding.)  The main work-stealing queue
 222      * design is roughly similar to those in the papers "Dynamic
 223      * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005
 224      * (http://research.sun.com/scalable/pubs/index.html) and
 225      * "Idempotent work stealing" by Michael, Saraswat, and Vechev,
 226      * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186).
 227      * The main differences ultimately stem from GC requirements that
 228      * we null out taken slots as soon as we can, to maintain as small
 229      * a footprint as possible even in programs generating huge
 230      * numbers of tasks. To accomplish this, we shift the CAS
 231      * arbitrating pop vs poll (steal) from being on the indices
 232      * ("base" and "top") to the slots themselves.
 233      *
 234      * Adding tasks then takes the form of a classic array push(task)
 235      * in a circular buffer:
 236      *    q.array[q.top++ % length] = task;
 237      *
 238      * The actual code needs to null-check and size-check the array,
 239      * uses masking, not mod, for indexing a power-of-two-sized array,
 240      * enforces memory ordering, supports resizing, and possibly
 241      * signals waiting workers to start scanning -- see below.
 242      *
 243      * The pop operation (always performed by owner) is of the form:
 244      *   if ((task = getAndSet(q.array, (q.top-1) % length, null)) != null)
 245      *        decrement top and return task;
 246      * If this fails, the queue is empty.
 247      *
 248      * The poll operation by another stealer thread is, basically:
 249      *   if (CAS nonnull task at q.array[q.base % length] to null)
 250      *       increment base and return task;
 251      *
 252      * This may fail due to contention, and may be retried.
 253      * Implementations must ensure a consistent snapshot of the base
 254      * index and the task (by looping or trying elsewhere) before
 255      * trying CAS.  There isn't actually a method of this form,
 256      * because failure due to inconsistency or contention is handled
 257      * in different ways in different contexts, normally by first
 258      * trying other queues. (For the most straightforward example, see
 259      * method pollScan.) There are further variants for cases
 260      * requiring inspection of elements before extracting them, so
 261      * must interleave these with variants of this code.  Also, a more
 262      * efficient version (nextLocalTask) is used for polls by owners.
 263      * It avoids some overhead because the queue cannot be growing
 264      * during call.
 265      *
 266      * Memory ordering.  See "Correct and Efficient Work-Stealing for
 267      * Weak Memory Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013
 268      * (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an
 269      * analysis of memory ordering requirements in work-stealing
 270      * algorithms similar to the one used here.  Inserting and
 271      * extracting tasks in array slots via volatile or atomic accesses
 272      * or explicit fences provides primary synchronization.
 273      *
 274      * Operations on deque elements require reads and writes of both
 275      * indices and slots. When possible, we allow these to occur in
 276      * any order.  Because the base and top indices (along with other
 277      * pool or array fields accessed in many methods) only imprecisely
 278      * guide where to extract from, we let accesses other than the
 279      * element getAndSet/CAS/setVolatile appear in any order, using
 280      * plain mode. But we must still preface some methods (mainly
 281      * those that may be accessed externally) with an acquireFence to
 282      * avoid unbounded staleness. This is equivalent to acting as if
 283      * callers use an acquiring read of the reference to the pool or
 284      * queue when invoking the method, even when they do not. We use
 285      * explicit acquiring reads (getSlot) rather than plain array
 286      * access when acquire mode is required but not otherwise ensured
 287      * by context. To reduce stalls by other stealers, we encourage
 288      * timely writes to the base index by immediately following
 289      * updates with a write of a volatile field that must be updated
 290      * anyway, or an Opaque-mode write if there is no such
 291      * opportunity.
 292      *
 293      * Because indices and slot contents cannot always be consistent,
 294      * the emptiness check base == top is only quiescently accurate
 295      * (and so used where this suffices). Otherwise, it may err on the
 296      * side of possibly making the queue appear nonempty when a push,
 297      * pop, or poll have not fully committed, or making it appear
 298      * empty when an update of top or base has not yet been seen.
 299      * Similarly, the check in push for the queue array being full may
 300      * trigger when not completely full, causing a resize earlier than
 301      * required.
 302      *
 303      * Mainly because of these potential inconsistencies among slots
 304      * vs indices, the poll operation, considered individually, is not
 305      * wait-free. One thief cannot successfully continue until another
 306      * in-progress one (or, if previously empty, a push) visibly
 307      * completes.  This can stall threads when required to consume
 308      * from a given queue (which may spin).  However, in the
 309      * aggregate, we ensure probabilistic non-blockingness at least
 310      * until checking quiescence (which is intrinsically blocking):
 311      * If an attempted steal fails, a scanning thief chooses a
 312      * different victim target to try next. So, in order for one thief
 313      * to progress, it suffices for any in-progress poll or new push
 314      * on any empty queue to complete. The worst cases occur when many
 315      * threads are looking for tasks being produced by a stalled
 316      * producer.
 317      *
 318      * This approach also enables support of a user mode in which
 319      * local task processing is in FIFO, not LIFO order, simply by
 320      * using poll rather than pop.  This can be useful in
 321      * message-passing frameworks in which tasks are never joined,
 322      * although with increased contention among task producers and
 323      * consumers.
 324      *
 325      * WorkQueues are also used in a similar way for tasks submitted
 326      * to the pool. We cannot mix these tasks in the same queues used
 327      * by workers. Instead, we randomly associate submission queues
 328      * with submitting threads, using a form of hashing.  The
 329      * ThreadLocalRandom probe value serves as a hash code for
 330      * choosing existing queues, and may be randomly repositioned upon
 331      * contention with other submitters.  In essence, submitters act
 332      * like workers except that they are restricted to executing local
 333      * tasks that they submitted (or when known, subtasks thereof).
 334      * Insertion of tasks in shared mode requires a lock. We use only
 335      * a simple spinlock (using field "source"), because submitters
 336      * encountering a busy queue move to a different position to use
 337      * or create other queues. They block only when registering new
 338      * queues.
 339      *
 340      * Management
 341      * ==========
 342      *
 343      * The main throughput advantages of work-stealing stem from
 344      * decentralized control -- workers mostly take tasks from
 345      * themselves or each other, at rates that can exceed a billion
 346      * per second.  Most non-atomic control is performed by some form
 347      * of scanning across or within queues.  The pool itself creates,
 348      * activates (enables scanning for and running tasks),
 349      * deactivates, blocks, and terminates threads, all with minimal
 350      * central information.  There are only a few properties that we
 351      * can globally track or maintain, so we pack them into a small
 352      * number of variables, often maintaining atomicity without
 353      * blocking or locking.  Nearly all essentially atomic control
 354      * state is held in a few volatile variables that are by far most
 355      * often read (not written) as status and consistency checks. We
 356      * pack as much information into them as we can.
 357      *
 358      * Field "ctl" contains 64 bits holding information needed to
 359      * atomically decide to add, enqueue (on an event queue), and
 360      * dequeue and release workers.  To enable this packing, we
 361      * restrict maximum parallelism to (1<<15)-1 (which is far in
 362      * excess of normal operating range) to allow ids, counts, and
 363      * their negations (used for thresholding) to fit into 16bit
 364      * subfields.
 365      *
 366      * Field "mode" holds configuration parameters as well as lifetime
 367      * status, atomically and monotonically setting SHUTDOWN, STOP,
 368      * and finally TERMINATED bits. It is updated only via bitwise
 369      * atomics (getAndBitwiseOr).
 370      *
 371      * Array "queues" holds references to WorkQueues.  It is updated
 372      * (only during worker creation and termination) under the
 373      * registrationLock, but is otherwise concurrently readable, and
 374      * accessed directly (although always prefaced by acquireFences or
 375      * other acquiring reads). To simplify index-based operations, the
 376      * array size is always a power of two, and all readers must
 377      * tolerate null slots.  Worker queues are at odd indices. Worker
 378      * ids masked with SMASK match their index. Shared (submission)
 379      * queues are at even indices. Grouping them together in this way
 380      * simplifies and speeds up task scanning.
 381      *
 382      * All worker thread creation is on-demand, triggered by task
 383      * submissions, replacement of terminated workers, and/or
 384      * compensation for blocked workers. However, all other support
 385      * code is set up to work with other policies.  To ensure that we
 386      * do not hold on to worker or task references that would prevent
 387      * GC, all accesses to workQueues are via indices into the
 388      * queues array (which is one source of some of the messy code
 389      * constructions here). In essence, the queues array serves as
 390      * a weak reference mechanism. Thus for example the stack top
 391      * subfield of ctl stores indices, not references.
 392      *
 393      * Queuing Idle Workers. Unlike HPC work-stealing frameworks, we
 394      * cannot let workers spin indefinitely scanning for tasks when
 395      * none can be found immediately, and we cannot start/resume
 396      * workers unless there appear to be tasks available.  On the
 397      * other hand, we must quickly prod them into action when new
 398      * tasks are submitted or generated. These latencies are mainly a
 399      * function of JVM park/unpark (and underlying OS) performance,
 400      * which can be slow and variable.  In many usages, ramp-up time
 401      * is the main limiting factor in overall performance, which is
 402      * compounded at program start-up by JIT compilation and
 403      * allocation. On the other hand, throughput degrades when too
 404      * many threads poll for too few tasks.
 405      *
 406      * The "ctl" field atomically maintains total and "released"
 407      * worker counts, plus the head of the available worker queue
 408      * (actually stack, represented by the lower 32bit subfield of
 409      * ctl).  Released workers are those known to be scanning for
 410      * and/or running tasks. Unreleased ("available") workers are
 411      * recorded in the ctl stack. These workers are made available for
 412      * signalling by enqueuing in ctl (see method awaitWork).  The
 413      * "queue" is a form of Treiber stack. This is ideal for
 414      * activating threads in most-recently used order, and improves
 415      * performance and locality, outweighing the disadvantages of
 416      * being prone to contention and inability to release a worker
 417      * unless it is topmost on stack. The top stack state holds the
 418      * value of the "phase" field of the worker: its index and status,
 419      * plus a version counter that, in addition to the count subfields
 420      * (also serving as version stamps) provide protection against
 421      * Treiber stack ABA effects.
 422      *
 423      * Creating workers. To create a worker, we pre-increment counts
 424      * (serving as a reservation), and attempt to construct a
 425      * ForkJoinWorkerThread via its factory. On starting, the new
 426      * thread first invokes registerWorker, where it constructs a
 427      * WorkQueue and is assigned an index in the queues array
 428      * (expanding the array if necessary).  Upon any exception across
 429      * these steps, or null return from factory, deregisterWorker
 430      * adjusts counts and records accordingly.  If a null return, the
 431      * pool continues running with fewer than the target number
 432      * workers. If exceptional, the exception is propagated, generally
 433      * to some external caller.
 434      *
 435      * WorkQueue field "phase" is used by both workers and the pool to
 436      * manage and track whether a worker is UNSIGNALLED (possibly
 437      * blocked waiting for a signal).  When a worker is enqueued its
 438      * phase field is set negative. Note that phase field updates lag
 439      * queue CAS releases; seeing a negative phase does not guarantee
 440      * that the worker is available. When queued, the lower 16 bits of
 441      * its phase must hold its pool index. So we place the index there
 442      * upon initialization and never modify these bits.
 443      *
 444      * The ctl field also serves as the basis for memory
 445      * synchronization surrounding activation. This uses a more
 446      * efficient version of a Dekker-like rule that task producers and
 447      * consumers sync with each other by both writing/CASing ctl (even
 448      * if to its current value).  However, rather than CASing ctl to
 449      * its current value in the common case where no action is
 450      * required, we reduce write contention by ensuring that
 451      * signalWork invocations are prefaced with a full-volatile memory
 452      * access (which is usually needed anyway).
 453      *
 454      * Signalling. Signals (in signalWork) cause new or reactivated
 455      * workers to scan for tasks.  Method signalWork and its callers
 456      * try to approximate the unattainable goal of having the right
 457      * number of workers activated for the tasks at hand, but must err
 458      * on the side of too many workers vs too few to avoid stalls.  If
 459      * computations are purely tree structured, it suffices for every
 460      * worker to activate another when it pushes a task into an empty
 461      * queue, resulting in O(log(#threads)) steps to full activation.
 462      * If instead, tasks come in serially from only a single producer,
 463      * each worker taking its first (since the last quiescence) task
 464      * from a queue should signal another if there are more tasks in
 465      * that queue. This is equivalent to, but generally faster than,
 466      * arranging the stealer take two tasks, re-pushing one on its own
 467      * queue, and signalling (because its queue is empty), also
 468      * resulting in logarithmic full activation time. Because we don't
 469      * know about usage patterns (or most commonly, mixtures), we use
 470      * both approaches.  We approximate the second rule by arranging
 471      * that workers in scan() do not repeat signals when repeatedly
 472      * taking tasks from any given queue, by remembering the previous
 473      * one. There are narrow windows in which both rules may apply,
 474      * leading to duplicate or unnecessary signals. Despite such
 475      * limitations, these rules usually avoid slowdowns that otherwise
 476      * occur when too many workers contend to take too few tasks, or
 477      * when producers waste most of their time resignalling.  However,
 478      * contention and overhead effects may still occur during ramp-up,
 479      * ramp-down, and small computations involving only a few workers.
 480      *
 481      * Scanning. Method scan performs top-level scanning for (and
 482      * execution of) tasks.  Scans by different workers and/or at
 483      * different times are unlikely to poll queues in the same
 484      * order. Each scan traverses and tries to poll from each queue in
 485      * a pseudorandom permutation order by starting at a random index,
 486      * and using a constant cyclically exhaustive stride; restarting
 487      * upon contention.  (Non-top-level scans; for example in
 488      * helpJoin, use simpler linear probes because they do not
 489      * systematically contend with top-level scans.)  The pseudorandom
 490      * generator need not have high-quality statistical properties in
 491      * the long term. We use Marsaglia XorShifts, seeded with the Weyl
 492      * sequence from ThreadLocalRandom probes, which are cheap and
 493      * suffice. Scans do not otherwise explicitly take into account
 494      * core affinities, loads, cache localities, etc, However, they do
 495      * exploit temporal locality (which usually approximates these) by
 496      * preferring to re-poll from the same queue after a successful
 497      * poll before trying others (see method topLevelExec).  This
 498      * reduces fairness, which is partially counteracted by using a
 499      * one-shot form of poll (tryPoll) that may lose to other workers.
 500      *
 501      * Deactivation. Method scan returns a sentinel when no tasks are
 502      * found, leading to deactivation (see awaitWork). The count
 503      * fields in ctl allow accurate discovery of quiescent states
 504      * (i.e., when all workers are idle) after deactivation. However,
 505      * this may also race with new (external) submissions, so a
 506      * recheck is also needed to determine quiescence. Upon apparently
 507      * triggering quiescence, awaitWork re-scans and self-signals if
 508      * it may have missed a signal. In other cases, a missed signal
 509      * may transiently lower parallelism because deactivation does not
 510      * necessarily mean that there is no more work, only that that
 511      * there were no tasks not taken by other workers.  But more
 512      * signals are generated (see above) to eventually reactivate if
 513      * needed.
 514      *
 515      * Trimming workers. To release resources after periods of lack of
 516      * use, a worker starting to wait when the pool is quiescent will
 517      * time out and terminate if the pool has remained quiescent for
 518      * period given by field keepAlive.
 519      *
 520      * Shutdown and Termination. A call to shutdownNow invokes
 521      * tryTerminate to atomically set a mode bit. The calling thread,
 522      * as well as every other worker thereafter terminating, helps
 523      * terminate others by cancelling their unprocessed tasks, and
 524      * waking them up. Calls to non-abrupt shutdown() preface this by
 525      * checking isQuiescent before triggering the "STOP" phase of
 526      * termination. To conform to ExecutorService invoke, invokeAll,
 527      * and invokeAny specs, we must track pool status while waiting,
 528      * and interrupt interruptible callers on termination (see
 529      * ForkJoinTask.joinForPoolInvoke etc).
 530      *
 531      * Joining Tasks
 532      * =============
 533      *
 534      * Normally, the first option when joining a task that is not done
 535      * is to try to unfork it from local queue and run it.  Otherwise,
 536      * any of several actions may be taken when one worker is waiting
 537      * to join a task stolen (or always held) by another.  Because we
 538      * are multiplexing many tasks on to a pool of workers, we can't
 539      * always just let them block (as in Thread.join).  We also cannot
 540      * just reassign the joiner's run-time stack with another and
 541      * replace it later, which would be a form of "continuation", that
 542      * even if possible is not necessarily a good idea since we may
 543      * need both an unblocked task and its continuation to progress.
 544      * Instead we combine two tactics:
 545      *
 546      *   Helping: Arranging for the joiner to execute some task that it
 547      *      could be running if the steal had not occurred.
 548      *
 549      *   Compensating: Unless there are already enough live threads,
 550      *      method tryCompensate() may create or re-activate a spare
 551      *      thread to compensate for blocked joiners until they unblock.
 552      *
 553      * A third form (implemented via tryRemove) amounts to helping a
 554      * hypothetical compensator: If we can readily tell that a
 555      * possible action of a compensator is to steal and execute the
 556      * task being joined, the joining thread can do so directly,
 557      * without the need for a compensation thread; although with a
 558      * (rare) possibility of reduced parallelism because of a
 559      * transient gap in the queue array.
 560      *
 561      * Other intermediate forms available for specific task types (for
 562      * example helpAsyncBlocker) often avoid or postpone the need for
 563      * blocking or compensation.
 564      *
 565      * The ManagedBlocker extension API can't use helping so relies
 566      * only on compensation in method awaitBlocker.
 567      *
 568      * The algorithm in helpJoin entails a form of "linear helping".
 569      * Each worker records (in field "source") the id of the queue
 570      * from which it last stole a task.  The scan in method helpJoin
 571      * uses these markers to try to find a worker to help (i.e., steal
 572      * back a task from and execute it) that could hasten completion
 573      * of the actively joined task.  Thus, the joiner executes a task
 574      * that would be on its own local deque if the to-be-joined task
 575      * had not been stolen. This is a conservative variant of the
 576      * approach described in Wagner & Calder "Leapfrogging: a portable
 577      * technique for implementing efficient futures" SIGPLAN Notices,
 578      * 1993 (http://portal.acm.org/citation.cfm?id=155354). It differs
 579      * mainly in that we only record queue ids, not full dependency
 580      * links.  This requires a linear scan of the queues array to
 581      * locate stealers, but isolates cost to when it is needed, rather
 582      * than adding to per-task overhead. Also, searches are limited to
 583      * direct and at most two levels of indirect stealers, after which
 584      * there are rapidly diminishing returns on increased overhead.
 585      * Searches can fail to locate stealers when stalls delay
 586      * recording sources.  Further, even when accurately identified,
 587      * stealers might not ever produce a task that the joiner can in
 588      * turn help with. So, compensation is tried upon failure to find
 589      * tasks to run.
 590      *
 591      * Joining CountedCompleters (see helpComplete) differs from (and
 592      * is generally more efficient than) other cases because task
 593      * eligibility is determined by checking completion chains rather
 594      * than tracking stealers.
 595      *
 596      * Joining under timeouts (ForkJoinTask timed get) uses a
 597      * constrained mixture of helping and compensating in part because
 598      * pools (actually, only the common pool) may not have any
 599      * available threads: If the pool is saturated (all available
 600      * workers are busy), the caller tries to remove and otherwise
 601      * help; else it blocks under compensation so that it may time out
 602      * independently of any tasks.
 603      *
 604      * Compensation does not by default aim to keep exactly the target
 605      * parallelism number of unblocked threads running at any given
 606      * time. Some previous versions of this class employed immediate
 607      * compensations for any blocked join. However, in practice, the
 608      * vast majority of blockages are transient byproducts of GC and
 609      * other JVM or OS activities that are made worse by replacement
 610      * when they cause longer-term oversubscription.  Rather than
 611      * impose arbitrary policies, we allow users to override the
 612      * default of only adding threads upon apparent starvation.  The
 613      * compensation mechanism may also be bounded.  Bounds for the
 614      * commonPool (see COMMON_MAX_SPARES) better enable JVMs to cope
 615      * with programming errors and abuse before running out of
 616      * resources to do so.
 617      *
 618      * Common Pool
 619      * ===========
 620      *
 621      * The static common pool always exists after static
 622      * initialization.  Since it (or any other created pool) need
 623      * never be used, we minimize initial construction overhead and
 624      * footprint to the setup of about a dozen fields.
 625      *
 626      * When external threads submit to the common pool, they can
 627      * perform subtask processing (see helpComplete and related
 628      * methods) upon joins.  This caller-helps policy makes it
 629      * sensible to set common pool parallelism level to one (or more)
 630      * less than the total number of available cores, or even zero for
 631      * pure caller-runs.  We do not need to record whether external
 632      * submissions are to the common pool -- if not, external help
 633      * methods return quickly. These submitters would otherwise be
 634      * blocked waiting for completion, so the extra effort (with
 635      * liberally sprinkled task status checks) in inapplicable cases
 636      * amounts to an odd form of limited spin-wait before blocking in
 637      * ForkJoinTask.join.
 638      *
 639      * Guarantees for common pool parallelism zero are limited to
 640      * tasks that are joined by their callers in a tree-structured
 641      * fashion or use CountedCompleters (as is true for jdk
 642      * parallelStreams). Support infiltrates several methods,
 643      * including those that retry helping steps until we are sure that
 644      * none apply if there are no workers.
 645      *
 646      * As a more appropriate default in managed environments, unless
 647      * overridden by system properties, we use workers of subclass
 648      * InnocuousForkJoinWorkerThread when there is a SecurityManager
 649      * present. These workers have no permissions set, do not belong
 650      * to any user-defined ThreadGroup, and erase all ThreadLocals
 651      * after executing any top-level task.  The associated mechanics
 652      * may be JVM-dependent and must access particular Thread class
 653      * fields to achieve this effect.
 654      *
 655      * Interrupt handling
 656      * ==================
 657      *
 658      * The framework is designed to manage task cancellation
 659      * (ForkJoinTask.cancel) independently from the interrupt status
 660      * of threads running tasks. (See the public ForkJoinTask
 661      * documentation for rationale.)  Interrupts are issued only in
 662      * tryTerminate, when workers should be terminating and tasks
 663      * should be cancelled anyway. Interrupts are cleared only when
 664      * necessary to ensure that calls to LockSupport.park do not loop
 665      * indefinitely (park returns immediately if the current thread is
 666      * interrupted). If so, interruption is reinstated after blocking
 667      * if status could be visible during the scope of any task.  For
 668      * cases in which task bodies are specified or desired to
 669      * interrupt upon cancellation, ForkJoinTask.cancel can be
 670      * overridden to do so (as is done for invoke{Any,All}).
 671      *
 672      * Memory placement
 673      * ================
 674      *
 675      * Performance can be very sensitive to placement of instances of
 676      * ForkJoinPool and WorkQueues and their queue arrays. To reduce
 677      * false-sharing impact, the @Contended annotation isolates the
 678      * ForkJoinPool.ctl field as well as the most heavily written
 679      * WorkQueue fields. These mainly reduce cache traffic by scanners.
 680      * WorkQueue arrays are presized large enough to avoid resizing
 681      * (which transiently reduces throughput) in most tree-like
 682      * computations, although not in some streaming usages. Initial
 683      * sizes are not large enough to avoid secondary contention
 684      * effects (especially for GC cardmarks) when queues are placed
 685      * near each other in memory. This is common, but has different
 686      * impact in different collectors and remains incompletely
 687      * addressed.
 688      *
 689      * Style notes
 690      * ===========
 691      *
 692      * Memory ordering relies mainly on atomic operations (CAS,
 693      * getAndSet, getAndAdd) along with explicit fences.  This can be
 694      * awkward and ugly, but also reflects the need to control
 695      * outcomes across the unusual cases that arise in very racy code
 696      * with very few invariants. All fields are read into locals
 697      * before use, and null-checked if they are references, even if
 698      * they can never be null under current usages.  Array accesses
 699      * using masked indices include checks (that are always true) that
 700      * the array length is non-zero to avoid compilers inserting more
 701      * expensive traps.  This is usually done in a "C"-like style of
 702      * listing declarations at the heads of methods or blocks, and
 703      * using inline assignments on first encounter.  Nearly all
 704      * explicit checks lead to bypass/return, not exception throws,
 705      * because they may legitimately arise during shutdown.
 706      *
 707      * There is a lot of representation-level coupling among classes
 708      * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask.  The
 709      * fields of WorkQueue maintain data structures managed by
 710      * ForkJoinPool, so are directly accessed.  There is little point
 711      * trying to reduce this, since any associated future changes in
 712      * representations will need to be accompanied by algorithmic
 713      * changes anyway. Several methods intrinsically sprawl because
 714      * they must accumulate sets of consistent reads of fields held in
 715      * local variables. Some others are artificially broken up to
 716      * reduce producer/consumer imbalances due to dynamic compilation.
 717      * There are also other coding oddities (including several
 718      * unnecessary-looking hoisted null checks) that help some methods
 719      * perform reasonably even when interpreted (not compiled).
 720      *
 721      * The order of declarations in this file is (with a few exceptions):
 722      * (1) Static utility functions
 723      * (2) Nested (static) classes
 724      * (3) Static fields
 725      * (4) Fields, along with constants used when unpacking some of them
 726      * (5) Internal control methods
 727      * (6) Callbacks and other support for ForkJoinTask methods
 728      * (7) Exported methods
 729      * (8) Static block initializing statics in minimally dependent order
 730      *
 731      * Revision notes
 732      * ==============
 733      *
 734      * The main sources of differences of January 2020 ForkJoin
 735      * classes from previous version are:
 736      *
 737      * * ForkJoinTask now uses field "aux" to support blocking joins
 738      *   and/or record exceptions, replacing reliance on builtin
 739      *   monitors and side tables.
 740      * * Scans probe slots (vs compare indices), along with related
 741      *   changes that reduce performance differences across most
 742      *   garbage collectors, and reduce contention.
 743      * * Refactoring for better integration of special task types and
 744      *   other capabilities that had been incrementally tacked on. Plus
 745      *   many minor reworkings to improve consistency.
 746      */
 747 
 748     // Static utilities
 749 
 750     /**
 751      * If there is a security manager, makes sure caller has
 752      * permission to modify threads.
 753      */
 754     private static void checkPermission() {
 755         @SuppressWarnings("removal")
 756         SecurityManager security = System.getSecurityManager();
 757         if (security != null)
 758             security.checkPermission(modifyThreadPermission);
 759     }
 760 
 761     @SuppressWarnings("removal")
 762     static AccessControlContext contextWithPermissions(Permission ... perms) {
 763         Permissions permissions = new Permissions();
 764         for (Permission perm : perms)
 765             permissions.add(perm);
 766         return new AccessControlContext(
 767             new ProtectionDomain[] { new ProtectionDomain(null, permissions) });
 768     }
 769 
 770     // Nested classes
 771 
 772     /**
 773      * Factory for creating new {@link ForkJoinWorkerThread}s.
 774      * A {@code ForkJoinWorkerThreadFactory} must be defined and used
 775      * for {@code ForkJoinWorkerThread} subclasses that extend base
 776      * functionality or initialize threads with different contexts.
 777      */
 778     public static interface ForkJoinWorkerThreadFactory {
 779         /**
 780          * Returns a new worker thread operating in the given pool.
 781          * Returning null or throwing an exception may result in tasks
 782          * never being executed.  If this method throws an exception,
 783          * it is relayed to the caller of the method (for example
 784          * {@code execute}) causing attempted thread creation. If this
 785          * method returns null or throws an exception, it is not
 786          * retried until the next attempted creation (for example
 787          * another call to {@code execute}).
 788          *
 789          * @param pool the pool this thread works in
 790          * @return the new worker thread, or {@code null} if the request
 791          *         to create a thread is rejected
 792          * @throws NullPointerException if the pool is null
 793          */
 794         public ForkJoinWorkerThread newThread(ForkJoinPool pool);
 795     }
 796 
 797     /**
 798      * Default ForkJoinWorkerThreadFactory implementation; creates a
 799      * new ForkJoinWorkerThread using the system class loader as the
 800      * thread context class loader.
 801      */
 802     static final class DefaultForkJoinWorkerThreadFactory
 803         implements ForkJoinWorkerThreadFactory {
 804         // ACC for access to the factory
 805         @SuppressWarnings("removal")
 806         private static final AccessControlContext ACC = contextWithPermissions(
 807             new RuntimePermission("getClassLoader"),
 808             new RuntimePermission("setContextClassLoader"));
 809         @SuppressWarnings("removal")
 810         public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
 811             return AccessController.doPrivileged(
 812                 new PrivilegedAction<>() {
 813                     public ForkJoinWorkerThread run() {
 814                         return new ForkJoinWorkerThread(null, pool, true, false);
 815                     }},
 816                 ACC);
 817         }
 818     }
 819 
 820     /**
 821      * Factory for CommonPool unless overridden by System property.
 822      * Creates InnocuousForkJoinWorkerThreads if a security manager is
 823      * present at time of invocation.  Support requires that we break
 824      * quite a lot of encapsulation (some via helper methods in
 825      * ThreadLocalRandom) to access and set Thread fields.
 826      */
 827     static final class DefaultCommonPoolForkJoinWorkerThreadFactory
 828         implements ForkJoinWorkerThreadFactory {
 829         @SuppressWarnings("removal")
 830         private static final AccessControlContext ACC = contextWithPermissions(
 831             modifyThreadPermission,
 832             new RuntimePermission("enableContextClassLoaderOverride"),
 833             new RuntimePermission("modifyThreadGroup"),
 834             new RuntimePermission("getClassLoader"),
 835             new RuntimePermission("setContextClassLoader"));
 836 
 837         @SuppressWarnings("removal")
 838         public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
 839             return AccessController.doPrivileged(
 840                  new PrivilegedAction<>() {
 841                      public ForkJoinWorkerThread run() {
 842                          return System.getSecurityManager() == null ?
 843                              new ForkJoinWorkerThread(null, pool, true, true):
 844                              new ForkJoinWorkerThread.
 845                              InnocuousForkJoinWorkerThread(pool); }},
 846                  ACC);
 847         }
 848     }
 849 
 850     // Constants shared across ForkJoinPool and WorkQueue
 851 
 852     // Bounds
 853     static final int SWIDTH       = 16;            // width of short
 854     static final int SMASK        = 0xffff;        // short bits == max index
 855     static final int MAX_CAP      = 0x7fff;        // max #workers - 1
 856 
 857     // Masks and units for WorkQueue.phase and ctl sp subfield
 858     static final int UNSIGNALLED  = 1 << 31;       // must be negative
 859     static final int SS_SEQ       = 1 << 16;       // version count
 860 
 861     // Mode bits and sentinels, some also used in WorkQueue fields
 862     static final int FIFO         = 1 << 16;       // fifo queue or access mode
 863     static final int SRC          = 1 << 17;       // set for valid queue ids
 864     static final int INNOCUOUS    = 1 << 18;       // set for Innocuous workers
 865     static final int QUIET        = 1 << 19;       // quiescing phase or source
 866     static final int SHUTDOWN     = 1 << 24;
 867     static final int TERMINATED   = 1 << 25;
 868     static final int STOP         = 1 << 31;       // must be negative
 869     static final int UNCOMPENSATE = 1 << 16;       // tryCompensate return
 870 
 871     /**
 872      * Initial capacity of work-stealing queue array.  Must be a power
 873      * of two, at least 2. See above.
 874      */
 875     static final int INITIAL_QUEUE_CAPACITY = 1 << 8;
 876 
 877     /**
 878      * Queues supporting work-stealing as well as external task
 879      * submission. See above for descriptions and algorithms.
 880      */
 881     static final class WorkQueue {
 882         volatile int phase;        // versioned, negative if inactive
 883         int stackPred;             // pool stack (ctl) predecessor link
 884         int config;                // index, mode, ORed with SRC after init
 885         int base;                  // index of next slot for poll
 886         ForkJoinTask<?>[] array;   // the queued tasks; power of 2 size
 887         final ForkJoinWorkerThread owner; // owning thread or null if shared
 888 
 889         // segregate fields frequently updated but not read by scans or steals
 890         @jdk.internal.vm.annotation.Contended("w")
 891         int top;                   // index of next slot for push
 892         @jdk.internal.vm.annotation.Contended("w")
 893         volatile int source;       // source queue id, lock, or sentinel
 894         @jdk.internal.vm.annotation.Contended("w")
 895         int nsteals;               // number of steals from other queues
 896 
 897         // Support for atomic operations
 898         private static final VarHandle QA; // for array slots
 899         private static final VarHandle SOURCE;
 900         private static final VarHandle BASE;
 901         static final ForkJoinTask<?> getSlot(ForkJoinTask<?>[] a, int i) {
 902             return (ForkJoinTask<?>)QA.getAcquire(a, i);
 903         }
 904         static final ForkJoinTask<?> getAndClearSlot(ForkJoinTask<?>[] a,
 905                                                      int i) {
 906             return (ForkJoinTask<?>)QA.getAndSet(a, i, null);
 907         }
 908         static final void setSlotVolatile(ForkJoinTask<?>[] a, int i,
 909                                           ForkJoinTask<?> v) {
 910             QA.setVolatile(a, i, v);
 911         }
 912         static final boolean casSlotToNull(ForkJoinTask<?>[] a, int i,
 913                                           ForkJoinTask<?> c) {
 914             return QA.compareAndSet(a, i, c, null);
 915         }
 916         final boolean tryLock() {
 917             return SOURCE.compareAndSet(this, 0, 1);
 918         }
 919         final void setBaseOpaque(int b) {
 920             BASE.setOpaque(this, b);
 921         }
 922 
 923         /**
 924          * Constructor used by ForkJoinWorkerThreads. Most fields
 925          * are initialized upon thread start, in pool.registerWorker.
 926          */
 927         WorkQueue(ForkJoinWorkerThread owner, boolean isInnocuous) {
 928             this.config = (isInnocuous) ? INNOCUOUS : 0;
 929             this.owner = owner;
 930         }
 931 
 932         /**
 933          * Constructor used for external queues.
 934          */
 935         WorkQueue(int config) {
 936             array = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
 937             this.config = config;
 938             owner = null;
 939             phase = -1;
 940         }
 941 
 942         /**
 943          * Returns an exportable index (used by ForkJoinWorkerThread).
 944          */
 945         final int getPoolIndex() {
 946             return (config & 0xffff) >>> 1; // ignore odd/even tag bit
 947         }
 948 
 949         /**
 950          * Returns the approximate number of tasks in the queue.
 951          */
 952         final int queueSize() {
 953             VarHandle.acquireFence(); // ensure fresh reads by external callers
 954             int n = top - base;
 955             return (n < 0) ? 0 : n;   // ignore transient negative
 956         }
 957 
 958         /**
 959          * Provides a more conservative estimate of whether this queue
 960          * has any tasks than does queueSize.
 961          */
 962         final boolean isEmpty() {
 963             return !((source != 0 && owner == null) || top - base > 0);
 964         }
 965 
 966         /**
 967          * Pushes a task. Call only by owner in unshared queues.
 968          *
 969          * @param task the task. Caller must ensure non-null.
 970          * @param pool (no-op if null)
 971          * @param signalOnEmpty signal a worker if queue was empty
 972          * @throws RejectedExecutionException if array cannot be resized
 973          */
 974         final void push(ForkJoinTask<?> task, ForkJoinPool pool, boolean signalOnEmpty) {
 975             ForkJoinTask<?>[] a = array;
 976             int s = top++, d = s - base, cap, m; // skip insert if disabled
 977             if (a != null && pool != null && (cap = a.length) > 0) {
 978                 setSlotVolatile(a, (m = cap - 1) & s, task);
 979                 if (d == m) {
 980                     growArray();
 981                     pool.signalWork();  // signal if resized
 982                 } else {
 983                     if (signalOnEmpty && a[m & (s - 1)] == null)
 984                         pool.signalWork(); // signal if was empty
 985                 }
 986             }
 987         }
 988 
 989         final void push(ForkJoinTask<?> task, ForkJoinPool pool) {
 990             push(task, pool, true);
 991         }
 992 
 993         /**
 994          * Pushes task to a shared queue with lock already held, and unlocks.
 995          *
 996          * @return true if caller should signal work
 997          */
 998         final boolean lockedPush(ForkJoinTask<?> task) {
 999             ForkJoinTask<?>[] a = array;
1000             int s = top++, d = s - base, cap, m;
1001             if (a != null && (cap = a.length) > 0) {
1002                 a[(m = cap - 1) & s] = task;
1003                 if (d == m)
1004                     growArray();
1005                 source = 0; // unlock
1006                 if (d == m || a[m & (s - 1)] == null)
1007                     return true;
1008             }
1009             return false;
1010         }
1011 
1012         /**
1013          * Doubles the capacity of array. Called by owner or with lock
1014          * held after pre-incrementing top, which is reverted on
1015          * allocation failure.
1016          */
1017         final void growArray() {
1018             ForkJoinTask<?>[] oldArray = array, newArray;
1019             int s = top - 1, oldCap, newCap;
1020             if (oldArray != null && (oldCap = oldArray.length) > 0 &&
1021                 (newCap = oldCap << 1) > 0) { // skip if disabled
1022                 try {
1023                     newArray = new ForkJoinTask<?>[newCap];
1024                 } catch (Throwable ex) {
1025                     top = s;
1026                     if (owner == null)
1027                         source = 0; // unlock
1028                     throw new RejectedExecutionException(
1029                         "Queue capacity exceeded");
1030                 }
1031                 int newMask = newCap - 1, oldMask = oldCap - 1;
1032                 for (int k = oldCap; k > 0; --k, --s) {
1033                     ForkJoinTask<?> x;        // poll old, push to new
1034                     if ((x = getAndClearSlot(oldArray, s & oldMask)) == null)
1035                         break;                // others already taken
1036                     newArray[s & newMask] = x;
1037                 }
1038                 VarHandle.releaseFence();     // fill before publish
1039                 array = newArray;
1040             }
1041         }
1042 
1043         // Variants of pop
1044 
1045         /**
1046          * Pops and returns task, or null if empty. Called only by owner.
1047          */
1048         private ForkJoinTask<?> pop() {
1049             ForkJoinTask<?> t = null;
1050             int s = top, cap; ForkJoinTask<?>[] a;
1051             if ((a = array) != null && (cap = a.length) > 0 && base != s-- &&
1052                 (t = getAndClearSlot(a, (cap - 1) & s)) != null)
1053                 top = s;
1054             return t;
1055         }
1056 
1057         /**
1058          * Pops the given task for owner only if it is at the current top.
1059          */
1060         final boolean tryUnpush(ForkJoinTask<?> task) {
1061             int s = top, cap; ForkJoinTask<?>[] a;
1062             if ((a = array) != null && (cap = a.length) > 0 && base != s-- &&
1063                 casSlotToNull(a, (cap - 1) & s, task)) {
1064                 top = s;
1065                 return true;
1066             }
1067             return false;
1068         }
1069 
1070         /**
1071          * Locking version of tryUnpush.
1072          */
1073         final boolean externalTryUnpush(ForkJoinTask<?> task) {
1074             boolean taken = false;
1075             for (;;) {
1076                 int s = top, cap, k; ForkJoinTask<?>[] a;
1077                 if ((a = array) == null || (cap = a.length) <= 0 ||
1078                     a[k = (cap - 1) & (s - 1)] != task)
1079                     break;
1080                 if (tryLock()) {
1081                     if (top == s && array == a) {
1082                         if (taken = casSlotToNull(a, k, task)) {
1083                             top = s - 1;
1084                             source = 0;
1085                             break;
1086                         }
1087                     }
1088                     source = 0; // release lock for retry
1089                 }
1090                 Thread.yield(); // trylock failure
1091             }
1092             return taken;
1093         }
1094 
1095         /**
1096          * Deep form of tryUnpush: Traverses from top and removes task if
1097          * present, shifting others to fill gap.
1098          */
1099         final boolean tryRemove(ForkJoinTask<?> task, boolean owned) {
1100             boolean taken = false;
1101             int p = top, cap; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
1102             if ((a = array) != null && task != null && (cap = a.length) > 0) {
1103                 int m = cap - 1, s = p - 1, d = p - base;
1104                 for (int i = s, k; d > 0; --i, --d) {
1105                     if ((t = a[k = i & m]) == task) {
1106                         if (owned || tryLock()) {
1107                             if ((owned || (array == a && top == p)) &&
1108                                 (taken = casSlotToNull(a, k, t))) {
1109                                 for (int j = i; j != s; ) // shift down
1110                                     a[j & m] = getAndClearSlot(a, ++j & m);
1111                                 top = s;
1112                             }
1113                             if (!owned)
1114                                 source = 0;
1115                         }
1116                         break;
1117                     }
1118                 }
1119             }
1120             return taken;
1121         }
1122 
1123         // variants of poll
1124 
1125         /**
1126          * Tries once to poll next task in FIFO order, failing on
1127          * inconsistency or contention.
1128          */
1129         final ForkJoinTask<?> tryPoll() {
1130             int cap, b, k; ForkJoinTask<?>[] a;
1131             if ((a = array) != null && (cap = a.length) > 0) {
1132                 ForkJoinTask<?> t = getSlot(a, k = (cap - 1) & (b = base));
1133                 if (base == b++ && t != null && casSlotToNull(a, k, t)) {
1134                     setBaseOpaque(b);
1135                     return t;
1136                 }
1137             }
1138             return null;
1139         }
1140 
1141         /**
1142          * Takes next task, if one exists, in order specified by mode.
1143          */
1144         final ForkJoinTask<?> nextLocalTask(int cfg) {
1145             ForkJoinTask<?> t = null;
1146             int s = top, cap; ForkJoinTask<?>[] a;
1147             if ((a = array) != null && (cap = a.length) > 0) {
1148                 for (int b, d;;) {
1149                     if ((d = s - (b = base)) <= 0)
1150                         break;
1151                     if (d == 1 || (cfg & FIFO) == 0) {
1152                         if ((t = getAndClearSlot(a, --s & (cap - 1))) != null)
1153                             top = s;
1154                         break;
1155                     }
1156                     if ((t = getAndClearSlot(a, b++ & (cap - 1))) != null) {
1157                         setBaseOpaque(b);
1158                         break;
1159                     }
1160                 }
1161             }
1162             return t;
1163         }
1164 
1165         /**
1166          * Takes next task, if one exists, using configured mode.
1167          */
1168         final ForkJoinTask<?> nextLocalTask() {
1169             return nextLocalTask(config);
1170         }
1171 
1172         /**
1173          * Returns next task, if one exists, in order specified by mode.
1174          */
1175         final ForkJoinTask<?> peek() {
1176             VarHandle.acquireFence();
1177             int cap; ForkJoinTask<?>[] a;
1178             return ((a = array) != null && (cap = a.length) > 0) ?
1179                 a[(cap - 1) & ((config & FIFO) != 0 ? base : top - 1)] : null;
1180         }
1181 
1182         // specialized execution methods
1183 
1184         /**
1185          * Runs the given (stolen) task if nonnull, as well as
1186          * remaining local tasks and/or others available from the
1187          * given queue.
1188          */
1189         final void topLevelExec(ForkJoinTask<?> task, WorkQueue q) {
1190             int cfg = config, nstolen = 1;
1191             while (task != null) {
1192                 task.doExec();
1193                 if ((task = nextLocalTask(cfg)) == null &&
1194                     q != null && (task = q.tryPoll()) != null)
1195                     ++nstolen;
1196             }
1197             nsteals += nstolen;
1198             source = 0;
1199             if ((cfg & INNOCUOUS) != 0)
1200                 ThreadLocalRandom.eraseThreadLocals(Thread.currentThread());
1201         }
1202 
1203         /**
1204          * Tries to pop and run tasks within the target's computation
1205          * until done, not found, or limit exceeded.
1206          *
1207          * @param task root of CountedCompleter computation
1208          * @param owned true if owned by a ForkJoinWorkerThread
1209          * @param limit max runs, or zero for no limit
1210          * @return task status on exit
1211          */
1212         final int helpComplete(ForkJoinTask<?> task, boolean owned, int limit) {
1213             int status = 0, cap, k, p, s; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
1214             while (task != null && (status = task.status) >= 0 &&
1215                    (a = array) != null && (cap = a.length) > 0 &&
1216                    (t = a[k = (cap - 1) & (s = (p = top) - 1)])
1217                    instanceof CountedCompleter) {
1218                 CountedCompleter<?> f = (CountedCompleter<?>)t;
1219                 boolean taken = false;
1220                 for (;;) {     // exec if root task is a completer of t
1221                     if (f == task) {
1222                         if (owned) {
1223                             if ((taken = casSlotToNull(a, k, t)))
1224                                 top = s;
1225                         }
1226                         else if (tryLock()) {
1227                             if (top == p && array == a &&
1228                                 (taken = casSlotToNull(a, k, t)))
1229                                 top = s;
1230                             source = 0;
1231                         }
1232                         if (taken)
1233                             t.doExec();
1234                         else if (!owned)
1235                             Thread.yield(); // tryLock failure
1236                         break;
1237                     }
1238                     else if ((f = f.completer) == null)
1239                         break;
1240                 }
1241                 if (taken && limit != 0 && --limit == 0)
1242                     break;
1243             }
1244             return status;
1245         }
1246 
1247         /**
1248          * Tries to poll and run AsynchronousCompletionTasks until
1249          * none found or blocker is released.
1250          *
1251          * @param blocker the blocker
1252          */
1253         final void helpAsyncBlocker(ManagedBlocker blocker) {
1254             int cap, b, d, k; ForkJoinTask<?>[] a; ForkJoinTask<?> t;
1255             while (blocker != null && (d = top - (b = base)) > 0 &&
1256                    (a = array) != null && (cap = a.length) > 0 &&
1257                    (((t = getSlot(a, k = (cap - 1) & b)) == null && d > 1) ||
1258                     t instanceof
1259                     CompletableFuture.AsynchronousCompletionTask) &&
1260                    !blocker.isReleasable()) {
1261                 if (t != null && base == b++ && casSlotToNull(a, k, t)) {
1262                     setBaseOpaque(b);
1263                     t.doExec();
1264                 }
1265             }
1266         }
1267 
1268         // misc
1269 
1270         /** AccessControlContext for innocuous workers, created on 1st use. */
1271         @SuppressWarnings("removal")
1272         private static AccessControlContext INNOCUOUS_ACC;
1273 
1274         /**
1275          * Initializes (upon registration) InnocuousForkJoinWorkerThreads.
1276          */
1277         @SuppressWarnings("removal")
1278         final void initializeInnocuousWorker() {
1279             AccessControlContext acc; // racy construction OK
1280             if ((acc = INNOCUOUS_ACC) == null)
1281                 INNOCUOUS_ACC = acc = new AccessControlContext(
1282                     new ProtectionDomain[] { new ProtectionDomain(null, null) });
1283             Thread t = Thread.currentThread();
1284             ThreadLocalRandom.setInheritedAccessControlContext(t, acc);
1285             ThreadLocalRandom.eraseThreadLocals(t);
1286         }
1287 
1288         /**
1289          * Returns true if owned by a worker thread and not known to be blocked.
1290          */
1291         final boolean isApparentlyUnblocked() {
1292             Thread wt; Thread.State s;
1293             return ((wt = owner) != null &&
1294                     (s = wt.getState()) != Thread.State.BLOCKED &&
1295                     s != Thread.State.WAITING &&
1296                     s != Thread.State.TIMED_WAITING);
1297         }
1298 
1299         static {
1300             try {
1301                 QA = MethodHandles.arrayElementVarHandle(ForkJoinTask[].class);
1302                 MethodHandles.Lookup l = MethodHandles.lookup();
1303                 SOURCE = l.findVarHandle(WorkQueue.class, "source", int.class);
1304                 BASE = l.findVarHandle(WorkQueue.class, "base", int.class);
1305             } catch (ReflectiveOperationException e) {
1306                 throw new ExceptionInInitializerError(e);
1307             }
1308         }
1309     }
1310 
1311     // static fields (initialized in static initializer below)
1312 
1313     /**
1314      * Creates a new ForkJoinWorkerThread. This factory is used unless
1315      * overridden in ForkJoinPool constructors.
1316      */
1317     public static final ForkJoinWorkerThreadFactory
1318         defaultForkJoinWorkerThreadFactory;
1319 
1320     /**
1321      * Permission required for callers of methods that may start or
1322      * kill threads.
1323      */
1324     static final RuntimePermission modifyThreadPermission;
1325 





















1326     /**
1327      * Sequence number for creating worker names
1328      */
1329     private static volatile int poolIds;
1330 
1331     // static configuration constants
1332 
1333     /**
1334      * Default idle timeout value (in milliseconds) for the thread
1335      * triggering quiescence to park waiting for new work
1336      */
1337     private static final long DEFAULT_KEEPALIVE = 60_000L;
1338 
1339     /**
1340      * Undershoot tolerance for idle timeouts
1341      */
1342     private static final long TIMEOUT_SLOP = 20L;
1343 










1344     /*
1345      * Bits and masks for field ctl, packed with 4 16 bit subfields:
1346      * RC: Number of released (unqueued) workers minus target parallelism
1347      * TC: Number of total workers minus target parallelism
1348      * SS: version count and status of top waiting thread
1349      * ID: poolIndex of top of Treiber stack of waiters
1350      *
1351      * When convenient, we can extract the lower 32 stack top bits
1352      * (including version bits) as sp=(int)ctl.  The offsets of counts
1353      * by the target parallelism and the positionings of fields makes
1354      * it possible to perform the most common checks via sign tests of
1355      * fields: When ac is negative, there are not enough unqueued
1356      * workers, when tc is negative, there are not enough total
1357      * workers.  When sp is non-zero, there are waiting workers.  To
1358      * deal with possibly negative fields, we use casts in and out of
1359      * "short" and/or signed shifts to maintain signedness.
1360      *
1361      * Because it occupies uppermost bits, we can add one release
1362      * count using getAndAdd of RC_UNIT, rather than CAS, when
1363      * returning from a blocked join.  Other updates entail multiple
1364      * subfields and masking, requiring CAS.
1365      *
1366      * The limits packed in field "bounds" are also offset by the
1367      * parallelism level to make them comparable to the ctl rc and tc
1368      * fields.
1369      */
1370 
1371     // Lower and upper word masks
1372     private static final long SP_MASK    = 0xffffffffL;
1373     private static final long UC_MASK    = ~SP_MASK;
1374 
1375     // Release counts
1376     private static final int  RC_SHIFT   = 48;
1377     private static final long RC_UNIT    = 0x0001L << RC_SHIFT;
1378     private static final long RC_MASK    = 0xffffL << RC_SHIFT;
1379 
1380     // Total counts
1381     private static final int  TC_SHIFT   = 32;
1382     private static final long TC_UNIT    = 0x0001L << TC_SHIFT;
1383     private static final long TC_MASK    = 0xffffL << TC_SHIFT;
1384     private static final long ADD_WORKER = 0x0001L << (TC_SHIFT + 15); // sign
1385 
1386     // Instance fields
1387 
1388     final long keepAlive;                // milliseconds before dropping if idle
1389     volatile long stealCount;            // collects worker nsteals
1390     int scanRover;                       // advances across pollScan calls
1391     volatile int threadIds;              // for worker thread names
1392     final int bounds;                    // min, max threads packed as shorts
1393     volatile int mode;                   // parallelism, runstate, queue mode
1394     WorkQueue[] queues;                  // main registry
1395     final ReentrantLock registrationLock;
1396     Condition termination;               // lazily constructed
1397     final String workerNamePrefix;       // null for common pool
1398     final ForkJoinWorkerThreadFactory factory;
1399     final UncaughtExceptionHandler ueh;  // per-worker UEH
1400     final Predicate<? super ForkJoinPool> saturate;
1401     final SharedThreadContainer container;
1402 
1403     @jdk.internal.vm.annotation.Contended("fjpctl") // segregate
1404     volatile long ctl;                   // main pool control
1405 
1406     // Support for atomic operations
1407     private static final VarHandle CTL;
1408     private static final VarHandle MODE;
1409     private static final VarHandle THREADIDS;
1410     private static final VarHandle POOLIDS;
1411     private boolean compareAndSetCtl(long c, long v) {
1412         return CTL.compareAndSet(this, c, v);
1413     }
1414     private long compareAndExchangeCtl(long c, long v) {
1415         return (long)CTL.compareAndExchange(this, c, v);
1416     }
1417     private long getAndAddCtl(long v) {
1418         return (long)CTL.getAndAdd(this, v);
1419     }
1420     private int getAndBitwiseOrMode(int v) {
1421         return (int)MODE.getAndBitwiseOr(this, v);
1422     }
1423     private int getAndAddThreadIds(int x) {
1424         return (int)THREADIDS.getAndAdd(this, x);
1425     }
1426     private static int getAndAddPoolIds(int x) {
1427         return (int)POOLIDS.getAndAdd(x);
1428     }
1429 
1430     // Creating, registering and deregistering workers
1431 
1432     /**
1433      * Tries to construct and start one worker. Assumes that total
1434      * count has already been incremented as a reservation.  Invokes
1435      * deregisterWorker on any failure.
1436      *
1437      * @return true if successful
1438      */
1439     private boolean createWorker() {
1440         ForkJoinWorkerThreadFactory fac = factory;
1441         Throwable ex = null;
1442         ForkJoinWorkerThread wt = null;
1443         try {
1444             if (fac != null && (wt = fac.newThread(this)) != null) {
1445                 container.start(wt);
1446                 return true;
1447             }
1448         } catch (Throwable rex) {
1449             ex = rex;
1450         }
1451         deregisterWorker(wt, ex);
1452         return false;
1453     }
1454 
1455     /**
1456      * Provides a name for ForkJoinWorkerThread constructor.
1457      */
1458     final String nextWorkerThreadName() {
1459         String prefix = workerNamePrefix;
1460         int tid = getAndAddThreadIds(1) + 1;
1461         if (prefix == null) // commonPool has no prefix
1462             prefix = "ForkJoinPool.commonPool-worker-";
1463         return prefix.concat(Integer.toString(tid));
1464     }
1465 
1466     /**
1467      * Finishes initializing and records owned queue.
1468      *
1469      * @param w caller's WorkQueue
1470      */
1471     final void registerWorker(WorkQueue w) {
1472         ReentrantLock lock = registrationLock;
1473         ThreadLocalRandom.localInit();
1474         int seed = ThreadLocalRandom.getProbe();
1475         if (w != null && lock != null) {
1476             int modebits = (mode & FIFO) | w.config;
1477             w.array = new ForkJoinTask<?>[INITIAL_QUEUE_CAPACITY];
1478             w.stackPred = seed;                         // stash for runWorker
1479             if ((modebits & INNOCUOUS) != 0)
1480                 w.initializeInnocuousWorker();
1481             int id = (seed << 1) | 1;                   // initial index guess
1482             lock.lock();
1483             try {
1484                 WorkQueue[] qs; int n;                  // find queue index
1485                 if ((qs = queues) != null && (n = qs.length) > 0) {
1486                     int k = n, m = n - 1;
1487                     for (; qs[id &= m] != null && k > 0; id -= 2, k -= 2);
1488                     if (k == 0)
1489                         id = n | 1;                     // resize below
1490                     w.phase = w.config = id | modebits; // now publishable
1491 
1492                     if (id < n)
1493                         qs[id] = w;
1494                     else {                              // expand array
1495                         int an = n << 1, am = an - 1;
1496                         WorkQueue[] as = new WorkQueue[an];
1497                         as[id & am] = w;
1498                         for (int j = 1; j < n; j += 2)
1499                             as[j] = qs[j];
1500                         for (int j = 0; j < n; j += 2) {
1501                             WorkQueue q;
1502                             if ((q = qs[j]) != null)    // shared queues may move
1503                                 as[q.config & am] = q;
1504                         }
1505                         VarHandle.releaseFence();       // fill before publish
1506                         queues = as;
1507                     }
1508                 }
1509             } finally {
1510                 lock.unlock();
1511             }
1512         }
1513     }
1514 
1515     /**
1516      * Final callback from terminating worker, as well as upon failure
1517      * to construct or start a worker.  Removes record of worker from
1518      * array, and adjusts counts. If pool is shutting down, tries to
1519      * complete termination.
1520      *
1521      * @param wt the worker thread, or null if construction failed
1522      * @param ex the exception causing failure, or null if none
1523      */
1524     final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) {
1525         ReentrantLock lock = registrationLock;
1526         WorkQueue w = null;
1527         int cfg = 0;
1528         if (wt != null && (w = wt.workQueue) != null && lock != null) {
1529             WorkQueue[] qs; int n, i;
1530             cfg = w.config;
1531             long ns = w.nsteals & 0xffffffffL;
1532             lock.lock();                             // remove index from array
1533             if ((qs = queues) != null && (n = qs.length) > 0 &&
1534                 qs[i = cfg & (n - 1)] == w)
1535                 qs[i] = null;
1536             stealCount += ns;                        // accumulate steals
1537             lock.unlock();
1538             long c = ctl;
1539             if ((cfg & QUIET) == 0) // unless self-signalled, decrement counts
1540                 do {} while (c != (c = compareAndExchangeCtl(
1541                                        c, ((RC_MASK & (c - RC_UNIT)) |
1542                                            (TC_MASK & (c - TC_UNIT)) |
1543                                            (SP_MASK & c)))));
1544             else if ((int)c == 0)                    // was dropped on timeout
1545                 cfg = 0;                             // suppress signal if last
1546             for (ForkJoinTask<?> t; (t = w.pop()) != null; )
1547                 ForkJoinTask.cancelIgnoringExceptions(t); // cancel tasks
1548         }
1549 
1550         if (!tryTerminate(false, false) && w != null && (cfg & SRC) != 0)
1551             signalWork();                            // possibly replace worker
1552         if (ex != null)
1553             ForkJoinTask.rethrow(ex);
1554     }
1555 
1556     /*
1557      * Tries to create or release a worker if too few are running.
1558      */
1559     final void signalWork() {
1560         for (long c = ctl; c < 0L;) {
1561             int sp, i; WorkQueue[] qs; WorkQueue v;
1562             if ((sp = (int)c & ~UNSIGNALLED) == 0) {  // no idle workers
1563                 if ((c & ADD_WORKER) == 0L)           // enough total workers
1564                     break;
1565                 if (c == (c = compareAndExchangeCtl(
1566                               c, ((RC_MASK & (c + RC_UNIT)) |
1567                                   (TC_MASK & (c + TC_UNIT)))))) {
1568                     createWorker();
1569                     break;
1570                 }
1571             }
1572             else if ((qs = queues) == null)
1573                 break;                                // unstarted/terminated
1574             else if (qs.length <= (i = sp & SMASK))
1575                 break;                                // terminated
1576             else if ((v = qs[i]) == null)
1577                 break;                                // terminating
1578             else {
1579                 long nc = (v.stackPred & SP_MASK) | (UC_MASK & (c + RC_UNIT));
1580                 Thread vt = v.owner;
1581                 if (c == (c = compareAndExchangeCtl(c, nc))) {
1582                     v.phase = sp;
1583                     LockSupport.unpark(vt);           // release idle worker
1584                     break;
1585                 }
1586             }
1587         }
1588     }
1589 
1590     /**
1591      * Top-level runloop for workers, called by ForkJoinWorkerThread.run.
1592      * See above for explanation.
1593      *
1594      * @param w caller's WorkQueue (may be null on failed initialization)
1595      */
1596     final void runWorker(WorkQueue w) {
1597         if (mode >= 0 && w != null) {           // skip on failed init
1598             w.config |= SRC;                    // mark as valid source
1599             int r = w.stackPred, src = 0;       // use seed from registerWorker
1600             do {
1601                 r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift
1602             } while ((src = scan(w, src, r)) >= 0 ||
1603                      (src = awaitWork(w)) == 0);
1604         }
1605     }
1606 
1607     /**
1608      * Scans for and if found executes top-level tasks: Tries to poll
1609      * each queue starting at a random index with random stride,
1610      * returning source id or retry indicator if contended or
1611      * inconsistent.
1612      *
1613      * @param w caller's WorkQueue
1614      * @param prevSrc the previous queue stolen from in current phase, or 0
1615      * @param r random seed
1616      * @return id of queue if taken, negative if none found, prevSrc for retry
1617      */
1618     private int scan(WorkQueue w, int prevSrc, int r) {
1619         WorkQueue[] qs = queues;
1620         int n = (w == null || qs == null) ? 0 : qs.length;
1621         for (int step = (r >>> 16) | 1, i = n; i > 0; --i, r += step) {
1622             int j, cap, b; WorkQueue q; ForkJoinTask<?>[] a;
1623             if ((q = qs[j = r & (n - 1)]) != null && // poll at qs[j].array[k]
1624                 (a = q.array) != null && (cap = a.length) > 0) {
1625                 int k = (cap - 1) & (b = q.base), nextBase = b + 1;
1626                 int nextIndex = (cap - 1) & nextBase, src = j | SRC;
1627                 ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1628                 if (q.base != b)                // inconsistent
1629                     return prevSrc;
1630                 else if (t != null && WorkQueue.casSlotToNull(a, k, t)) {
1631                     q.base = nextBase;
1632                     ForkJoinTask<?> next = a[nextIndex];
1633                     if ((w.source = src) != prevSrc && next != null)
1634                         signalWork();           // propagate
1635                     w.topLevelExec(t, q);
1636                     return src;
1637                 }
1638                 else if (a[nextIndex] != null)  // revisit
1639                     return prevSrc;
1640             }
1641         }
1642         return (queues != qs) ? prevSrc: -1;    // possibly resized
1643     }
1644 
1645     /**
1646      * Advances worker phase, pushes onto ctl stack, and awaits signal
1647      * or reports termination.
1648      *
1649      * @return negative if terminated, else 0
1650      */
1651     private int awaitWork(WorkQueue w) {
1652         if (w == null)
1653             return -1;                       // already terminated
1654         int phase = (w.phase + SS_SEQ) & ~UNSIGNALLED;
1655         w.phase = phase | UNSIGNALLED;       // advance phase
1656         long prevCtl = ctl, c;               // enqueue
1657         do {
1658             w.stackPred = (int)prevCtl;
1659             c = ((prevCtl - RC_UNIT) & UC_MASK) | (phase & SP_MASK);
1660         } while (prevCtl != (prevCtl = compareAndExchangeCtl(prevCtl, c)));
1661 
1662         Thread.interrupted();                // clear status
1663         LockSupport.setCurrentBlocker(this); // prepare to block (exit also OK)
1664         long deadline = 0L;                  // nonzero if possibly quiescent
1665         int ac = (int)(c >> RC_SHIFT), md;
1666         if ((md = mode) < 0)                 // pool is terminating
1667             return -1;
1668         else if ((md & SMASK) + ac <= 0) {
1669             boolean checkTermination = (md & SHUTDOWN) != 0;
1670             if ((deadline = System.currentTimeMillis() + keepAlive) == 0L)
1671                 deadline = 1L;               // avoid zero
1672             WorkQueue[] qs = queues;         // check for racing submission
1673             int n = (qs == null) ? 0 : qs.length;
1674             for (int i = 0; i < n; i += 2) {
1675                 WorkQueue q; ForkJoinTask<?>[] a; int cap, b;
1676                 if (ctl != c) {              // already signalled
1677                     checkTermination = false;
1678                     break;
1679                 }
1680                 else if ((q = qs[i]) != null &&
1681                          (a = q.array) != null && (cap = a.length) > 0 &&
1682                          ((b = q.base) != q.top || a[(cap - 1) & b] != null ||
1683                           q.source != 0)) {
1684                     if (compareAndSetCtl(c, prevCtl))
1685                         w.phase = phase;     // self-signal
1686                     checkTermination = false;
1687                     break;
1688                 }
1689             }
1690             if (checkTermination && tryTerminate(false, false))
1691                 return -1;                   // trigger quiescent termination
1692         }
1693 
1694         for (boolean alt = false;;) {        // await activation or termination
1695             if (w.phase >= 0)
1696                 break;
1697             else if (mode < 0)
1698                 return -1;
1699             else if ((c = ctl) == prevCtl)
1700                 Thread.onSpinWait();         // signal in progress
1701             else if (!(alt = !alt))          // check between park calls
1702                 Thread.interrupted();
1703             else if (deadline == 0L)
1704                 LockSupport.park();
1705             else if (deadline - System.currentTimeMillis() > TIMEOUT_SLOP)
1706                 LockSupport.parkUntil(deadline);
1707             else if (((int)c & SMASK) == (w.config & SMASK) &&
1708                      compareAndSetCtl(c, ((UC_MASK & (c - TC_UNIT)) |
1709                                           (prevCtl & SP_MASK)))) {
1710                 w.config |= QUIET;           // sentinel for deregisterWorker
1711                 return -1;                   // drop on timeout
1712             }
1713             else if ((deadline += keepAlive) == 0L)
1714                 deadline = 1L;               // not at head; restart timer
1715         }
1716         return 0;
1717     }
1718 
1719     // Utilities used by ForkJoinTask
1720 
1721     /**
1722      * Returns true if can start terminating if enabled, or already terminated
1723      */
1724     final boolean canStop() {
1725         outer: for (long oldSum = 0L;;) { // repeat until stable
1726             int md; WorkQueue[] qs;  long c;
1727             if ((qs = queues) == null || ((md = mode) & STOP) != 0)
1728                 return true;
1729             if ((md & SMASK) + (int)((c = ctl) >> RC_SHIFT) > 0)
1730                 break;
1731             long checkSum = c;
1732             for (int i = 1; i < qs.length; i += 2) { // scan submitters
1733                 WorkQueue q; ForkJoinTask<?>[] a; int s = 0, cap;
1734                 if ((q = qs[i]) != null && (a = q.array) != null &&
1735                     (cap = a.length) > 0 &&
1736                     ((s = q.top) != q.base || a[(cap - 1) & s] != null ||
1737                      q.source != 0))
1738                     break outer;
1739                 checkSum += (((long)i) << 32) ^ s;
1740             }
1741             if (oldSum == (oldSum = checkSum) && queues == qs)
1742                 return true;
1743         }
1744         return (mode & STOP) != 0; // recheck mode on false return
1745     }
1746 
1747     /**
1748      * Tries to decrement counts (sometimes implicitly) and possibly
1749      * arrange for a compensating worker in preparation for
1750      * blocking. May fail due to interference, in which case -1 is
1751      * returned so caller may retry. A zero return value indicates
1752      * that the caller doesn't need to re-adjust counts when later
1753      * unblocked.
1754      *
1755      * @param c incoming ctl value
1756      * @return UNCOMPENSATE: block then adjust, 0: block, -1 : retry
1757      */
1758     private int tryCompensate(long c) {
1759         Predicate<? super ForkJoinPool> sat;
1760         int md = mode, b = bounds;
1761         // counts are signed; centered at parallelism level == 0
1762         int minActive = (short)(b & SMASK),
1763             maxTotal  = b >>> SWIDTH,
1764             active    = (int)(c >> RC_SHIFT),
1765             total     = (short)(c >>> TC_SHIFT),
1766             sp        = (int)c & ~UNSIGNALLED;
1767         if ((md & SMASK) == 0)
1768             return 0;                  // cannot compensate if parallelism zero
1769         else if (total >= 0) {
1770             if (sp != 0) {                        // activate idle worker
1771                 WorkQueue[] qs; int n; WorkQueue v;
1772                 if ((qs = queues) != null && (n = qs.length) > 0 &&
1773                     (v = qs[sp & (n - 1)]) != null) {
1774                     Thread vt = v.owner;
1775                     long nc = ((long)v.stackPred & SP_MASK) | (UC_MASK & c);
1776                     if (compareAndSetCtl(c, nc)) {
1777                         v.phase = sp;
1778                         LockSupport.unpark(vt);
1779                         return UNCOMPENSATE;
1780                     }
1781                 }
1782                 return -1;                        // retry
1783             }
1784             else if (active > minActive) {        // reduce parallelism
1785                 long nc = ((RC_MASK & (c - RC_UNIT)) | (~RC_MASK & c));
1786                 return compareAndSetCtl(c, nc) ? UNCOMPENSATE : -1;
1787             }
1788         }
1789         if (total < maxTotal) {                   // expand pool
1790             long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK);
1791             return (!compareAndSetCtl(c, nc) ? -1 :
1792                     !createWorker() ? 0 : UNCOMPENSATE);
1793         }
1794         else if (!compareAndSetCtl(c, c))         // validate
1795             return -1;
1796         else if ((sat = saturate) != null && sat.test(this))
1797             return 0;
1798         else
1799             throw new RejectedExecutionException(
1800                 "Thread limit exceeded replacing blocked worker");
1801     }
1802 
1803     /**
1804      * Readjusts RC count; called from ForkJoinTask after blocking.
1805      */
1806     final void uncompensate() {
1807         getAndAddCtl(RC_UNIT);
1808     }
1809 
1810     /**
1811      * Helps if possible until the given task is done.  Scans other
1812      * queues for a task produced by one of w's stealers; returning
1813      * compensated blocking sentinel if none are found.
1814      *
1815      * @param task the task
1816      * @param w caller's WorkQueue
1817      * @param canHelp if false, compensate only
1818      * @return task status on exit, or UNCOMPENSATE for compensated blocking
1819      */
1820     final int helpJoin(ForkJoinTask<?> task, WorkQueue w, boolean canHelp) {
1821         int s = 0;
1822         if (task != null && w != null) {
1823             int wsrc = w.source, wid = w.config & SMASK, r = wid + 2;
1824             boolean scan = true;
1825             long c = 0L;                          // track ctl stability
1826             outer: for (;;) {
1827                 if ((s = task.status) < 0)
1828                     break;
1829                 else if (scan = !scan) {          // previous scan was empty
1830                     if (mode < 0)
1831                         ForkJoinTask.cancelIgnoringExceptions(task);
1832                     else if (c == (c = ctl) && (s = tryCompensate(c)) >= 0)
1833                         break;                    // block
1834                 }
1835                 else if (canHelp) {               // scan for subtasks
1836                     WorkQueue[] qs = queues;
1837                     int n = (qs == null) ? 0 : qs.length, m = n - 1;
1838                     for (int i = n; i > 0; i -= 2, r += 2) {
1839                         int j; WorkQueue q, x, y; ForkJoinTask<?>[] a;
1840                         if ((q = qs[j = r & m]) != null) {
1841                             int sq = q.source & SMASK, cap, b;
1842                             if ((a = q.array) != null && (cap = a.length) > 0) {
1843                                 int k = (cap - 1) & (b = q.base);
1844                                 int nextBase = b + 1, src = j | SRC, sx;
1845                                 ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1846                                 boolean eligible = sq == wid ||
1847                                     ((x = qs[sq & m]) != null &&   // indirect
1848                                      ((sx = (x.source & SMASK)) == wid ||
1849                                       ((y = qs[sx & m]) != null && // 2-indirect
1850                                        (y.source & SMASK) == wid)));
1851                                 if ((s = task.status) < 0)
1852                                     break outer;
1853                                 else if ((q.source & SMASK) != sq ||
1854                                          q.base != b)
1855                                     scan = true;          // inconsistent
1856                                 else if (t == null)
1857                                     scan |= (a[nextBase & (cap - 1)] != null ||
1858                                              q.top != b); // lagging
1859                                 else if (eligible) {
1860                                     if (WorkQueue.casSlotToNull(a, k, t)) {
1861                                         q.base = nextBase;
1862                                         w.source = src;
1863                                         t.doExec();
1864                                         w.source = wsrc;
1865                                     }
1866                                     scan = true;
1867                                     break;
1868                                 }
1869                             }
1870                         }
1871                     }
1872                 }
1873             }
1874         }
1875         return s;
1876     }
1877 
1878     /**
1879      * Extra helpJoin steps for CountedCompleters.  Scans for and runs
1880      * subtasks of the given root task, returning if none are found.
1881      *
1882      * @param task root of CountedCompleter computation
1883      * @param w caller's WorkQueue
1884      * @param owned true if owned by a ForkJoinWorkerThread
1885      * @return task status on exit
1886      */
1887     final int helpComplete(ForkJoinTask<?> task, WorkQueue w, boolean owned) {
1888         int s = 0;
1889         if (task != null && w != null) {
1890             int r = w.config;
1891             boolean scan = true, locals = true;
1892             long c = 0L;
1893             outer: for (;;) {
1894                 if (locals) {                     // try locals before scanning
1895                     if ((s = w.helpComplete(task, owned, 0)) < 0)
1896                         break;
1897                     locals = false;
1898                 }
1899                 else if ((s = task.status) < 0)
1900                     break;
1901                 else if (scan = !scan) {
1902                     if (c == (c = ctl))
1903                         break;
1904                 }
1905                 else {                            // scan for subtasks
1906                     WorkQueue[] qs = queues;
1907                     int n = (qs == null) ? 0 : qs.length;
1908                     for (int i = n; i > 0; --i, ++r) {
1909                         int j, cap, b; WorkQueue q; ForkJoinTask<?>[] a;
1910                         boolean eligible = false;
1911                         if ((q = qs[j = r & (n - 1)]) != null &&
1912                             (a = q.array) != null && (cap = a.length) > 0) {
1913                             int k = (cap - 1) & (b = q.base), nextBase = b + 1;
1914                             ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1915                             if (t instanceof CountedCompleter) {
1916                                 CountedCompleter<?> f = (CountedCompleter<?>)t;
1917                                 do {} while (!(eligible = (f == task)) &&
1918                                              (f = f.completer) != null);
1919                             }
1920                             if ((s = task.status) < 0)
1921                                 break outer;
1922                             else if (q.base != b)
1923                                 scan = true;       // inconsistent
1924                             else if (t == null)
1925                                 scan |= (a[nextBase & (cap - 1)] != null ||
1926                                          q.top != b);
1927                             else if (eligible) {
1928                                 if (WorkQueue.casSlotToNull(a, k, t)) {
1929                                     q.setBaseOpaque(nextBase);
1930                                     t.doExec();
1931                                     locals = true;
1932                                 }
1933                                 scan = true;
1934                                 break;
1935                             }
1936                         }
1937                     }
1938                 }
1939             }
1940         }
1941         return s;
1942     }
1943 
1944     /**
1945      * Scans for and returns a polled task, if available.  Used only
1946      * for untracked polls. Begins scan at an index (scanRover)
1947      * advanced on each call, to avoid systematic unfairness.
1948      *
1949      * @param submissionsOnly if true, only scan submission queues
1950      */
1951     private ForkJoinTask<?> pollScan(boolean submissionsOnly) {
1952         VarHandle.acquireFence();
1953         int r = scanRover += 0x61c88647; // Weyl increment; raciness OK
1954         if (submissionsOnly)             // even indices only
1955             r &= ~1;
1956         int step = (submissionsOnly) ? 2 : 1;
1957         WorkQueue[] qs; int n;
1958         while ((qs = queues) != null && (n = qs.length) > 0) {
1959             boolean scan = false;
1960             for (int i = 0; i < n; i += step) {
1961                 int j, cap, b; WorkQueue q; ForkJoinTask<?>[] a;
1962                 if ((q = qs[j = (n - 1) & (r + i)]) != null &&
1963                     (a = q.array) != null && (cap = a.length) > 0) {
1964                     int k = (cap - 1) & (b = q.base), nextBase = b + 1;
1965                     ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
1966                     if (q.base != b)
1967                         scan = true;
1968                     else if (t == null)
1969                         scan |= (q.top != b || a[nextBase & (cap - 1)] != null);
1970                     else if (!WorkQueue.casSlotToNull(a, k, t))
1971                         scan = true;
1972                     else {
1973                         q.setBaseOpaque(nextBase);
1974                         return t;
1975                     }
1976                 }
1977             }
1978             if (!scan && queues == qs)
1979                 break;
1980         }
1981         return null;
1982     }
1983 
1984     /**
1985      * Runs tasks until {@code isQuiescent()}. Rather than blocking
1986      * when tasks cannot be found, rescans until all others cannot
1987      * find tasks either.
1988      *
1989      * @param nanos max wait time (Long.MAX_VALUE if effectively untimed)
1990      * @param interruptible true if return on interrupt
1991      * @return positive if quiescent, negative if interrupted, else 0
1992      */
1993     final int helpQuiescePool(WorkQueue w, long nanos, boolean interruptible) {
1994         if (w == null)
1995             return 0;
1996         long startTime = System.nanoTime(), parkTime = 0L;
1997         int prevSrc = w.source, wsrc = prevSrc, cfg = w.config, r = cfg + 1;
1998         for (boolean active = true, locals = true;;) {
1999             boolean busy = false, scan = false;
2000             if (locals) {  // run local tasks before (re)polling
2001                 locals = false;
2002                 for (ForkJoinTask<?> u; (u = w.nextLocalTask(cfg)) != null;)
2003                     u.doExec();
2004             }
2005             WorkQueue[] qs = queues;
2006             int n = (qs == null) ? 0 : qs.length;
2007             for (int i = n; i > 0; --i, ++r) {
2008                 int j, b, cap; WorkQueue q; ForkJoinTask<?>[] a;
2009                 if ((q = qs[j = (n - 1) & r]) != null && q != w &&
2010                     (a = q.array) != null && (cap = a.length) > 0) {
2011                     int k = (cap - 1) & (b = q.base);
2012                     int nextBase = b + 1, src = j | SRC;
2013                     ForkJoinTask<?> t = WorkQueue.getSlot(a, k);
2014                     if (q.base != b)
2015                         busy = scan = true;
2016                     else if (t != null) {
2017                         busy = scan = true;
2018                         if (!active) {    // increment before taking
2019                             active = true;
2020                             getAndAddCtl(RC_UNIT);
2021                         }
2022                         if (WorkQueue.casSlotToNull(a, k, t)) {
2023                             q.base = nextBase;
2024                             w.source = src;
2025                             t.doExec();
2026                             w.source = wsrc = prevSrc;
2027                             locals = true;
2028                         }
2029                         break;
2030                     }
2031                     else if (!busy) {
2032                         if (q.top != b || a[nextBase & (cap - 1)] != null)
2033                             busy = scan = true;
2034                         else if (q.source != QUIET && q.phase >= 0)
2035                             busy = true;
2036                     }
2037                 }
2038             }
2039             VarHandle.acquireFence();
2040             if (!scan && queues == qs) {
2041                 boolean interrupted;
2042                 if (!busy) {
2043                     w.source = prevSrc;
2044                     if (!active)
2045                         getAndAddCtl(RC_UNIT);
2046                     return 1;
2047                 }
2048                 if (wsrc != QUIET)
2049                     w.source = wsrc = QUIET;
2050                 if (active) {                 // decrement
2051                     active = false;
2052                     parkTime = 0L;
2053                     getAndAddCtl(RC_MASK & -RC_UNIT);
2054                 }
2055                 else if (parkTime == 0L) {
2056                     parkTime = 1L << 10; // initially about 1 usec
2057                     Thread.yield();
2058                 }
2059                 else if ((interrupted = interruptible && Thread.interrupted()) ||
2060                          System.nanoTime() - startTime > nanos) {
2061                     getAndAddCtl(RC_UNIT);
2062                     return interrupted ? -1 : 0;
2063                 }
2064                 else {
2065                     LockSupport.parkNanos(this, parkTime);
2066                     if (parkTime < nanos >>> 8 && parkTime < 1L << 20)
2067                         parkTime <<= 1;  // max sleep approx 1 sec or 1% nanos
2068                 }
2069             }
2070         }
2071     }
2072 
2073     /**
2074      * Helps quiesce from external caller until done, interrupted, or timeout
2075      *
2076      * @param nanos max wait time (Long.MAX_VALUE if effectively untimed)
2077      * @param interruptible true if return on interrupt
2078      * @return positive if quiescent, negative if interrupted, else 0
2079      */
2080     final int externalHelpQuiescePool(long nanos, boolean interruptible) {
2081         for (long startTime = System.nanoTime(), parkTime = 0L;;) {
2082             ForkJoinTask<?> t;
2083             if ((t = pollScan(false)) != null) {
2084                 t.doExec();
2085                 parkTime = 0L;
2086             }
2087             else if (canStop())
2088                 return 1;
2089             else if (parkTime == 0L) {
2090                 parkTime = 1L << 10;
2091                 Thread.yield();
2092             }
2093             else if ((System.nanoTime() - startTime) > nanos)
2094                 return 0;
2095             else if (interruptible && Thread.interrupted())
2096                 return -1;
2097             else {
2098                 LockSupport.parkNanos(this, parkTime);
2099                 if (parkTime < nanos >>> 8 && parkTime < 1L << 20)
2100                     parkTime <<= 1;
2101             }
2102         }
2103     }
2104 
2105     /**
2106      * Gets and removes a local or stolen task for the given worker.
2107      *
2108      * @return a task, if available
2109      */
2110     final ForkJoinTask<?> nextTaskFor(WorkQueue w) {
2111         ForkJoinTask<?> t;
2112         if (w == null || (t = w.nextLocalTask(w.config)) == null)
2113             t = pollScan(false);
2114         return t;
2115     }
2116 
2117     // External operations
2118 
2119     /**
2120      * Finds and locks a WorkQueue for an external submitter, or
2121      * returns null if shutdown or terminating.
2122      */
2123     final WorkQueue submissionQueue() {
2124         int r;
2125         if ((r = ThreadLocalRandom.getProbe()) == 0) {
2126             ThreadLocalRandom.localInit();           // initialize caller's probe
2127             r = ThreadLocalRandom.getProbe();
2128         }
2129         for (int id = r << 1;;) {                    // even indices only
2130             int md = mode, n, i; WorkQueue q; ReentrantLock lock;
2131             WorkQueue[] qs = queues;
2132             if ((md & SHUTDOWN) != 0 || qs == null || (n = qs.length) <= 0)
2133                 return null;
2134             else if ((q = qs[i = (n - 1) & id]) == null) {
2135                 if ((lock = registrationLock) != null) {
2136                     WorkQueue w = new WorkQueue(id | SRC);
2137                     lock.lock();                    // install under lock
2138                     if (qs[i] == null)
2139                         qs[i] = w;                  // else lost race; discard
2140                     lock.unlock();
2141                 }
2142             }
2143             else if (!q.tryLock())                  // move and restart
2144                 id = (r = ThreadLocalRandom.advanceProbe(r)) << 1;
2145             else
2146                 return q;
2147         }
2148     }
2149 
2150     /**
2151      * Adds the given task to an external submission queue, or throws
2152      * exception if shutdown or terminating.
2153      *
2154      * @param task the task. Caller must ensure non-null.
2155      */
2156     final void externalPush(ForkJoinTask<?> task) {
2157         WorkQueue q;
2158         if ((q = submissionQueue()) == null)
2159             throw new RejectedExecutionException(); // shutdown or disabled
2160         else if (q.lockedPush(task))
2161             signalWork();
2162     }
2163 
2164     /**
2165      * Pushes a possibly-external submission.
2166      */
2167     private <T> ForkJoinTask<T> externalSubmit(ForkJoinTask<T> task) {
2168         Thread t; ForkJoinWorkerThread wt; WorkQueue q;
2169         if (task == null)
2170             throw new NullPointerException();
2171         if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) &&
2172             (q = (wt = (ForkJoinWorkerThread)t).workQueue) != null &&
2173             wt.pool == this)
2174             q.push(task, this);
2175         else
2176             externalPush(task);
2177         return task;
2178     }
2179 
2180     /**
2181      * Pushes an external submission to the current carrier thread's work queue if
2182      * possible. This method is invoked (reflectively) by the virtual thread
2183      * implementation.
2184      *
2185      * @param signalOnEmpty true to signal a worker when the queue is empty
2186      */
2187     private void externalExecuteTask(Runnable task, boolean signalOnEmpty) {
2188         var forkJoinTask = new ForkJoinTask.RunnableExecuteAction(task);
2189         Thread t = VirtualThreads.currentCarrierThread();
2190         if (t instanceof ForkJoinWorkerThread wt && wt.pool == this) {
2191             wt.workQueue.push(forkJoinTask, this, signalOnEmpty);
2192         } else {
2193             externalPush(forkJoinTask);
2194         }
2195     }
2196 
2197     /**
2198      * Returns common pool queue for an external thread that has
2199      * possibly ever submitted a common pool task (nonzero probe), or
2200      * null if none.
2201      */
2202     static WorkQueue commonQueue() {
2203         ForkJoinPool p; WorkQueue[] qs;
2204         int r = ThreadLocalRandom.getProbe(), n;
2205         return ((p = CommonPool.common) != null && (qs = p.queues) != null &&
2206                 (n = qs.length) > 0 && r != 0) ?
2207             qs[(n - 1) & (r << 1)] : null;
2208     }
2209 
2210     /**
2211      * Returns queue for an external thread, if one exists
2212      */
2213     final WorkQueue externalQueue() {
2214         WorkQueue[] qs;
2215         int r = ThreadLocalRandom.getProbe(), n;
2216         return ((qs = queues) != null && (n = qs.length) > 0 && r != 0) ?
2217             qs[(n - 1) & (r << 1)] : null;
2218     }
2219 
2220     /**
2221      * If the given executor is a ForkJoinPool, poll and execute
2222      * AsynchronousCompletionTasks from worker's queue until none are
2223      * available or blocker is released.
2224      */
2225     static void helpAsyncBlocker(Executor e, ManagedBlocker blocker) {
2226         WorkQueue w = null; Thread t; ForkJoinWorkerThread wt;
2227         if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) {
2228             if ((wt = (ForkJoinWorkerThread)t).pool == e)
2229                 w = wt.workQueue;
2230         }
2231         else if (e instanceof ForkJoinPool)
2232             w = ((ForkJoinPool)e).externalQueue();
2233         if (w != null)
2234             w.helpAsyncBlocker(blocker);
2235     }
2236 
2237     /**
2238      * Returns a cheap heuristic guide for task partitioning when
2239      * programmers, frameworks, tools, or languages have little or no
2240      * idea about task granularity.  In essence, by offering this
2241      * method, we ask users only about tradeoffs in overhead vs
2242      * expected throughput and its variance, rather than how finely to
2243      * partition tasks.
2244      *
2245      * In a steady state strict (tree-structured) computation, each
2246      * thread makes available for stealing enough tasks for other
2247      * threads to remain active. Inductively, if all threads play by
2248      * the same rules, each thread should make available only a
2249      * constant number of tasks.
2250      *
2251      * The minimum useful constant is just 1. But using a value of 1
2252      * would require immediate replenishment upon each steal to
2253      * maintain enough tasks, which is infeasible.  Further,
2254      * partitionings/granularities of offered tasks should minimize
2255      * steal rates, which in general means that threads nearer the top
2256      * of computation tree should generate more than those nearer the
2257      * bottom. In perfect steady state, each thread is at
2258      * approximately the same level of computation tree. However,
2259      * producing extra tasks amortizes the uncertainty of progress and
2260      * diffusion assumptions.
2261      *
2262      * So, users will want to use values larger (but not much larger)
2263      * than 1 to both smooth over transient shortages and hedge
2264      * against uneven progress; as traded off against the cost of
2265      * extra task overhead. We leave the user to pick a threshold
2266      * value to compare with the results of this call to guide
2267      * decisions, but recommend values such as 3.
2268      *
2269      * When all threads are active, it is on average OK to estimate
2270      * surplus strictly locally. In steady-state, if one thread is
2271      * maintaining say 2 surplus tasks, then so are others. So we can
2272      * just use estimated queue length.  However, this strategy alone
2273      * leads to serious mis-estimates in some non-steady-state
2274      * conditions (ramp-up, ramp-down, other stalls). We can detect
2275      * many of these by further considering the number of "idle"
2276      * threads, that are known to have zero queued tasks, so
2277      * compensate by a factor of (#idle/#active) threads.
2278      */
2279     static int getSurplusQueuedTaskCount() {
2280         Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q;
2281         if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) &&
2282             (pool = (wt = (ForkJoinWorkerThread)t).pool) != null &&
2283             (q = wt.workQueue) != null) {
2284             int p = pool.mode & SMASK;
2285             int a = p + (int)(pool.ctl >> RC_SHIFT);
2286             int n = q.top - q.base;
2287             return n - (a > (p >>>= 1) ? 0 :
2288                         a > (p >>>= 1) ? 1 :
2289                         a > (p >>>= 1) ? 2 :
2290                         a > (p >>>= 1) ? 4 :
2291                         8);
2292         }
2293         return 0;
2294     }
2295 
2296     // Termination
2297 
2298     /**
2299      * Possibly initiates and/or completes termination.
2300      *
2301      * @param now if true, unconditionally terminate, else only
2302      * if no work and no active workers
2303      * @param enable if true, terminate when next possible
2304      * @return true if terminating or terminated
2305      */
2306     private boolean tryTerminate(boolean now, boolean enable) {
2307         int md; // try to set SHUTDOWN, then STOP, then help terminate
2308         if (((md = mode) & SHUTDOWN) == 0) {
2309             if (!enable)
2310                 return false;
2311             md = getAndBitwiseOrMode(SHUTDOWN);
2312         }
2313         if ((md & STOP) == 0) {
2314             if (!now && !canStop())
2315                 return false;
2316             md = getAndBitwiseOrMode(STOP);
2317         }
2318         for (boolean rescan = true;;) { // repeat until no changes
2319             boolean changed = false;
2320             for (ForkJoinTask<?> t; (t = pollScan(false)) != null; ) {
2321                 changed = true;
2322                 ForkJoinTask.cancelIgnoringExceptions(t); // help cancel
2323             }
2324             WorkQueue[] qs; int n; WorkQueue q; Thread thread;
2325             if ((qs = queues) != null && (n = qs.length) > 0) {
2326                 for (int j = 1; j < n; j += 2) { // unblock other workers
2327                     if ((q = qs[j]) != null && (thread = q.owner) != null &&
2328                         !thread.isInterrupted()) {
2329                         changed = true;
2330                         try {
2331                             thread.interrupt();
2332                         } catch (Throwable ignore) {
2333                         }
2334                     }
2335                 }
2336             }
2337             ReentrantLock lock; Condition cond; // signal when no workers
2338             if (((md = mode) & TERMINATED) == 0 &&
2339                 (md & SMASK) + (short)(ctl >>> TC_SHIFT) <= 0 &&
2340                 (getAndBitwiseOrMode(TERMINATED) & TERMINATED) == 0 &&
2341                 (lock = registrationLock) != null) {
2342                 lock.lock();
2343                 if ((cond = termination) != null)
2344                     cond.signalAll();
2345                 lock.unlock();
2346                 container.close();
2347             }
2348             if (changed)
2349                 rescan = true;
2350             else if (rescan)
2351                 rescan = false;
2352             else
2353                 break;
2354         }
2355         return true;
2356     }
2357 
2358     // Exported methods
2359 
2360     // Constructors
2361 
2362     /**
2363      * Creates a {@code ForkJoinPool} with parallelism equal to {@link
2364      * java.lang.Runtime#availableProcessors}, using defaults for all
2365      * other parameters (see {@link #ForkJoinPool(int,
2366      * ForkJoinWorkerThreadFactory, UncaughtExceptionHandler, boolean,
2367      * int, int, int, Predicate, long, TimeUnit)}).
2368      *
2369      * @throws SecurityException if a security manager exists and
2370      *         the caller is not permitted to modify threads
2371      *         because it does not hold {@link
2372      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2373      */
2374     public ForkJoinPool() {
2375         this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()),
2376              defaultForkJoinWorkerThreadFactory, null, false,
2377              0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
2378     }
2379 
2380     /**
2381      * Creates a {@code ForkJoinPool} with the indicated parallelism
2382      * level, using defaults for all other parameters (see {@link
2383      * #ForkJoinPool(int, ForkJoinWorkerThreadFactory,
2384      * UncaughtExceptionHandler, boolean, int, int, int, Predicate,
2385      * long, TimeUnit)}).
2386      *
2387      * @param parallelism the parallelism level
2388      * @throws IllegalArgumentException if parallelism less than or
2389      *         equal to zero, or greater than implementation limit
2390      * @throws SecurityException if a security manager exists and
2391      *         the caller is not permitted to modify threads
2392      *         because it does not hold {@link
2393      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2394      */
2395     public ForkJoinPool(int parallelism) {
2396         this(parallelism, defaultForkJoinWorkerThreadFactory, null, false,
2397              0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
2398     }
2399 
2400     /**
2401      * Creates a {@code ForkJoinPool} with the given parameters (using
2402      * defaults for others -- see {@link #ForkJoinPool(int,
2403      * ForkJoinWorkerThreadFactory, UncaughtExceptionHandler, boolean,
2404      * int, int, int, Predicate, long, TimeUnit)}).
2405      *
2406      * @param parallelism the parallelism level. For default value,
2407      * use {@link java.lang.Runtime#availableProcessors}.
2408      * @param factory the factory for creating new threads. For default value,
2409      * use {@link #defaultForkJoinWorkerThreadFactory}.
2410      * @param handler the handler for internal worker threads that
2411      * terminate due to unrecoverable errors encountered while executing
2412      * tasks. For default value, use {@code null}.
2413      * @param asyncMode if true,
2414      * establishes local first-in-first-out scheduling mode for forked
2415      * tasks that are never joined. This mode may be more appropriate
2416      * than default locally stack-based mode in applications in which
2417      * worker threads only process event-style asynchronous tasks.
2418      * For default value, use {@code false}.
2419      * @throws IllegalArgumentException if parallelism less than or
2420      *         equal to zero, or greater than implementation limit
2421      * @throws NullPointerException if the factory is null
2422      * @throws SecurityException if a security manager exists and
2423      *         the caller is not permitted to modify threads
2424      *         because it does not hold {@link
2425      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2426      */
2427     public ForkJoinPool(int parallelism,
2428                         ForkJoinWorkerThreadFactory factory,
2429                         UncaughtExceptionHandler handler,
2430                         boolean asyncMode) {
2431         this(parallelism, factory, handler, asyncMode,
2432              0, MAX_CAP, 1, null, DEFAULT_KEEPALIVE, TimeUnit.MILLISECONDS);
2433     }
2434 
2435     /**
2436      * Creates a {@code ForkJoinPool} with the given parameters.
2437      *
2438      * @param parallelism the parallelism level. For default value,
2439      * use {@link java.lang.Runtime#availableProcessors}.
2440      *
2441      * @param factory the factory for creating new threads. For
2442      * default value, use {@link #defaultForkJoinWorkerThreadFactory}.
2443      *
2444      * @param handler the handler for internal worker threads that
2445      * terminate due to unrecoverable errors encountered while
2446      * executing tasks. For default value, use {@code null}.
2447      *
2448      * @param asyncMode if true, establishes local first-in-first-out
2449      * scheduling mode for forked tasks that are never joined. This
2450      * mode may be more appropriate than default locally stack-based
2451      * mode in applications in which worker threads only process
2452      * event-style asynchronous tasks.  For default value, use {@code
2453      * false}.
2454      *
2455      * @param corePoolSize the number of threads to keep in the pool
2456      * (unless timed out after an elapsed keep-alive). Normally (and
2457      * by default) this is the same value as the parallelism level,
2458      * but may be set to a larger value to reduce dynamic overhead if
2459      * tasks regularly block. Using a smaller value (for example
2460      * {@code 0}) has the same effect as the default.
2461      *
2462      * @param maximumPoolSize the maximum number of threads allowed.
2463      * When the maximum is reached, attempts to replace blocked
2464      * threads fail.  (However, because creation and termination of
2465      * different threads may overlap, and may be managed by the given
2466      * thread factory, this value may be transiently exceeded.)  To
2467      * arrange the same value as is used by default for the common
2468      * pool, use {@code 256} plus the {@code parallelism} level. (By
2469      * default, the common pool allows a maximum of 256 spare
2470      * threads.)  Using a value (for example {@code
2471      * Integer.MAX_VALUE}) larger than the implementation's total
2472      * thread limit has the same effect as using this limit (which is
2473      * the default).
2474      *
2475      * @param minimumRunnable the minimum allowed number of core
2476      * threads not blocked by a join or {@link ManagedBlocker}.  To
2477      * ensure progress, when too few unblocked threads exist and
2478      * unexecuted tasks may exist, new threads are constructed, up to
2479      * the given maximumPoolSize.  For the default value, use {@code
2480      * 1}, that ensures liveness.  A larger value might improve
2481      * throughput in the presence of blocked activities, but might
2482      * not, due to increased overhead.  A value of zero may be
2483      * acceptable when submitted tasks cannot have dependencies
2484      * requiring additional threads.
2485      *
2486      * @param saturate if non-null, a predicate invoked upon attempts
2487      * to create more than the maximum total allowed threads.  By
2488      * default, when a thread is about to block on a join or {@link
2489      * ManagedBlocker}, but cannot be replaced because the
2490      * maximumPoolSize would be exceeded, a {@link
2491      * RejectedExecutionException} is thrown.  But if this predicate
2492      * returns {@code true}, then no exception is thrown, so the pool
2493      * continues to operate with fewer than the target number of
2494      * runnable threads, which might not ensure progress.
2495      *
2496      * @param keepAliveTime the elapsed time since last use before
2497      * a thread is terminated (and then later replaced if needed).
2498      * For the default value, use {@code 60, TimeUnit.SECONDS}.
2499      *
2500      * @param unit the time unit for the {@code keepAliveTime} argument
2501      *
2502      * @throws IllegalArgumentException if parallelism is less than or
2503      *         equal to zero, or is greater than implementation limit,
2504      *         or if maximumPoolSize is less than parallelism,
2505      *         of if the keepAliveTime is less than or equal to zero.
2506      * @throws NullPointerException if the factory is null
2507      * @throws SecurityException if a security manager exists and
2508      *         the caller is not permitted to modify threads
2509      *         because it does not hold {@link
2510      *         java.lang.RuntimePermission}{@code ("modifyThread")}
2511      * @since 9
2512      */
2513     public ForkJoinPool(int parallelism,
2514                         ForkJoinWorkerThreadFactory factory,
2515                         UncaughtExceptionHandler handler,
2516                         boolean asyncMode,
2517                         int corePoolSize,
2518                         int maximumPoolSize,
2519                         int minimumRunnable,
2520                         Predicate<? super ForkJoinPool> saturate,
2521                         long keepAliveTime,
2522                         TimeUnit unit) {
2523         checkPermission();
2524         int p = parallelism;
2525         if (p <= 0 || p > MAX_CAP || p > maximumPoolSize || keepAliveTime <= 0L)
2526             throw new IllegalArgumentException();
2527         if (factory == null || unit == null)
2528             throw new NullPointerException();
2529         this.factory = factory;
2530         this.ueh = handler;
2531         this.saturate = saturate;
2532         this.keepAlive = Math.max(unit.toMillis(keepAliveTime), TIMEOUT_SLOP);
2533         int size = 1 << (33 - Integer.numberOfLeadingZeros(p - 1));
2534         int corep = Math.min(Math.max(corePoolSize, p), MAX_CAP);
2535         int maxSpares = Math.min(maximumPoolSize, MAX_CAP) - p;
2536         int minAvail = Math.min(Math.max(minimumRunnable, 0), MAX_CAP);
2537         this.bounds = ((minAvail - p) & SMASK) | (maxSpares << SWIDTH);
2538         this.mode = p | (asyncMode ? FIFO : 0);
2539         this.ctl = ((((long)(-corep) << TC_SHIFT) & TC_MASK) |
2540                     (((long)(-p)     << RC_SHIFT) & RC_MASK));
2541         this.registrationLock = new ReentrantLock();
2542         this.queues = new WorkQueue[size];
2543         String pid = Integer.toString(getAndAddPoolIds(1) + 1);
2544         this.workerNamePrefix = "ForkJoinPool-" + pid + "-worker-";
2545 
2546         String name = "ForkJoinPool-" + pid;
2547         this.container = SharedThreadContainer.create(name);
2548     }
2549 
2550     // helper method for commonPool constructor
2551     private static Object newInstanceFromSystemProperty(String property)
2552         throws ReflectiveOperationException {
2553         String className = System.getProperty(property);
2554         return (className == null)
2555             ? null
2556             : ClassLoader.getSystemClassLoader().loadClass(className)
2557             .getConstructor().newInstance();
2558     }
2559 
2560     /**
2561      * Constructor for common pool using parameters possibly
2562      * overridden by system properties
2563      */
2564     private ForkJoinPool(byte forCommonPoolOnly) {
2565         int parallelism = Math.max(1, Runtime.getRuntime().availableProcessors() - 1);
2566         ForkJoinWorkerThreadFactory fac = null;
2567         UncaughtExceptionHandler handler = null;
2568         try {  // ignore exceptions in accessing/parsing properties
2569             fac = (ForkJoinWorkerThreadFactory) newInstanceFromSystemProperty(
2570                 "java.util.concurrent.ForkJoinPool.common.threadFactory");
2571             handler = (UncaughtExceptionHandler) newInstanceFromSystemProperty(
2572                 "java.util.concurrent.ForkJoinPool.common.exceptionHandler");
2573             String pp = System.getProperty
2574                 ("java.util.concurrent.ForkJoinPool.common.parallelism");
2575             if (pp != null)
2576                 parallelism = Integer.parseInt(pp);
2577         } catch (Exception ignore) {
2578         }
2579         this.ueh = handler;
2580         this.keepAlive = DEFAULT_KEEPALIVE;
2581         this.saturate = null;
2582         this.workerNamePrefix = null;
2583         int p = Math.min(Math.max(parallelism, 0), MAX_CAP), size;
2584         this.mode = p;
2585         if (p > 0) {
2586             size = 1 << (33 - Integer.numberOfLeadingZeros(p - 1));
2587             this.bounds = ((1 - p) & SMASK) | (CommonPool.COMMON_MAX_SPARES << SWIDTH);
2588             this.ctl = ((((long)(-p) << TC_SHIFT) & TC_MASK) |
2589                         (((long)(-p) << RC_SHIFT) & RC_MASK));
2590         } else {  // zero min, max, spare counts, 1 slot
2591             size = 1;
2592             this.bounds = 0;
2593             this.ctl = 0L;
2594         }
2595         this.factory = (fac != null) ? fac :
2596             new DefaultCommonPoolForkJoinWorkerThreadFactory();
2597         this.queues = new WorkQueue[size];
2598         this.registrationLock = new ReentrantLock();
2599 
2600         String name = "ForkJoinPool.commonPool";
2601         this.container = SharedThreadContainer.create(name);
2602     }
2603 
2604     /**
2605      * Returns the common pool instance. This pool is statically
2606      * constructed; its run state is unaffected by attempts to {@link
2607      * #shutdown} or {@link #shutdownNow}. However this pool and any
2608      * ongoing processing are automatically terminated upon program
2609      * {@link System#exit}.  Any program that relies on asynchronous
2610      * task processing to complete before program termination should
2611      * invoke {@code commonPool().}{@link #awaitQuiescence awaitQuiescence},
2612      * before exit.
2613      *
2614      * @return the common pool instance
2615      * @since 1.8
2616      */
2617     public static ForkJoinPool commonPool() {
2618         // assert common != null : "static init error";
2619         return CommonPool.common;
2620     }
2621 
2622     // Execution methods
2623 
2624     /**
2625      * Performs the given task, returning its result upon completion.
2626      * If the computation encounters an unchecked Exception or Error,
2627      * it is rethrown as the outcome of this invocation.  Rethrown
2628      * exceptions behave in the same way as regular exceptions, but,
2629      * when possible, contain stack traces (as displayed for example
2630      * using {@code ex.printStackTrace()}) of both the current thread
2631      * as well as the thread actually encountering the exception;
2632      * minimally only the latter.
2633      *
2634      * @param task the task
2635      * @param <T> the type of the task's result
2636      * @return the task's result
2637      * @throws NullPointerException if the task is null
2638      * @throws RejectedExecutionException if the task cannot be
2639      *         scheduled for execution
2640      */
2641     public <T> T invoke(ForkJoinTask<T> task) {
2642         externalSubmit(task);
2643         return task.joinForPoolInvoke(this);
2644     }
2645 
2646     /**
2647      * Arranges for (asynchronous) execution of the given task.
2648      *
2649      * @param task the task
2650      * @throws NullPointerException if the task is null
2651      * @throws RejectedExecutionException if the task cannot be
2652      *         scheduled for execution
2653      */
2654     public void execute(ForkJoinTask<?> task) {
2655         externalSubmit(task);
2656     }
2657 
2658     // AbstractExecutorService methods
2659 
2660     /**
2661      * @throws NullPointerException if the task is null
2662      * @throws RejectedExecutionException if the task cannot be
2663      *         scheduled for execution
2664      */
2665     @Override
2666     @SuppressWarnings("unchecked")
2667     public void execute(Runnable task) {
2668         externalSubmit((task instanceof ForkJoinTask<?>)
2669                        ? (ForkJoinTask<Void>) task // avoid re-wrap
2670                        : new ForkJoinTask.RunnableExecuteAction(task));
2671     }
2672 
2673     /**
2674      * Submits a ForkJoinTask for execution.
2675      *
2676      * @param task the task to submit
2677      * @param <T> the type of the task's result
2678      * @return the task
2679      * @throws NullPointerException if the task is null
2680      * @throws RejectedExecutionException if the task cannot be
2681      *         scheduled for execution
2682      */
2683     public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) {
2684         return externalSubmit(task);
2685     }
2686 
2687     /**
2688      * @throws NullPointerException if the task is null
2689      * @throws RejectedExecutionException if the task cannot be
2690      *         scheduled for execution
2691      */
2692     @Override
2693     public <T> ForkJoinTask<T> submit(Callable<T> task) {
2694         return externalSubmit(new ForkJoinTask.AdaptedCallable<T>(task));
2695     }
2696 
2697     /**
2698      * @throws NullPointerException if the task is null
2699      * @throws RejectedExecutionException if the task cannot be
2700      *         scheduled for execution
2701      */
2702     @Override
2703     public <T> ForkJoinTask<T> submit(Runnable task, T result) {
2704         return externalSubmit(new ForkJoinTask.AdaptedRunnable<T>(task, result));
2705     }
2706 
2707     /**
2708      * @throws NullPointerException if the task is null
2709      * @throws RejectedExecutionException if the task cannot be
2710      *         scheduled for execution
2711      */
2712     @Override
2713     @SuppressWarnings("unchecked")
2714     public ForkJoinTask<?> submit(Runnable task) {
2715         return externalSubmit((task instanceof ForkJoinTask<?>)
2716             ? (ForkJoinTask<Void>) task // avoid re-wrap
2717             : new ForkJoinTask.AdaptedRunnableAction(task));
2718     }
2719 
2720     /**
2721      * @throws NullPointerException       {@inheritDoc}
2722      * @throws RejectedExecutionException {@inheritDoc}
2723      */
2724     @Override
2725     public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) {
2726         ArrayList<Future<T>> futures = new ArrayList<>(tasks.size());
2727         try {
2728             for (Callable<T> t : tasks) {
2729                 ForkJoinTask<T> f =
2730                     new ForkJoinTask.AdaptedInterruptibleCallable<T>(t);
2731                 futures.add(f);
2732                 externalSubmit(f);
2733             }
2734             for (int i = futures.size() - 1; i >= 0; --i)
2735                 ((ForkJoinTask<?>)futures.get(i)).awaitPoolInvoke(this);
2736             return futures;
2737         } catch (Throwable t) {
2738             for (Future<T> e : futures)
2739                 ForkJoinTask.cancelIgnoringExceptions(e);
2740             throw t;
2741         }
2742     }
2743 
2744     @Override
2745     public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks,
2746                                          long timeout, TimeUnit unit)
2747         throws InterruptedException {
2748         long nanos = unit.toNanos(timeout);
2749         ArrayList<Future<T>> futures = new ArrayList<>(tasks.size());
2750         try {
2751             for (Callable<T> t : tasks) {
2752                 ForkJoinTask<T> f =
2753                     new ForkJoinTask.AdaptedInterruptibleCallable<T>(t);
2754                 futures.add(f);
2755                 externalSubmit(f);
2756             }
2757             long startTime = System.nanoTime(), ns = nanos;
2758             boolean timedOut = (ns < 0L);
2759             for (int i = futures.size() - 1; i >= 0; --i) {
2760                 Future<T> f = futures.get(i);
2761                 if (!f.isDone()) {
2762                     if (timedOut)
2763                         ForkJoinTask.cancelIgnoringExceptions(f);
2764                     else {
2765                         ((ForkJoinTask<T>)f).awaitPoolInvoke(this, ns);
2766                         if ((ns = nanos - (System.nanoTime() - startTime)) < 0L)
2767                             timedOut = true;
2768                     }
2769                 }
2770             }
2771             return futures;
2772         } catch (Throwable t) {
2773             for (Future<T> e : futures)
2774                 ForkJoinTask.cancelIgnoringExceptions(e);
2775             throw t;
2776         }
2777     }
2778 
2779     // Task to hold results from InvokeAnyTasks
2780     static final class InvokeAnyRoot<E> extends ForkJoinTask<E> {
2781         private static final long serialVersionUID = 2838392045355241008L;
2782         @SuppressWarnings("serial") // Conditionally serializable
2783         volatile E result;
2784         final AtomicInteger count;  // in case all throw
2785         @SuppressWarnings("serial")
2786         final ForkJoinPool pool;    // to check shutdown while collecting
2787         InvokeAnyRoot(int n, ForkJoinPool p) {
2788             pool = p;
2789             count = new AtomicInteger(n);
2790         }
2791         final void tryComplete(Callable<E> c) { // called by InvokeAnyTasks
2792             Throwable ex = null;
2793             boolean failed;
2794             if (c == null || Thread.interrupted() ||
2795                 (pool != null && pool.mode < 0))
2796                 failed = true;
2797             else if (isDone())
2798                 failed = false;
2799             else {
2800                 try {
2801                     complete(c.call());
2802                     failed = false;
2803                 } catch (Throwable tx) {
2804                     ex = tx;
2805                     failed = true;
2806                 }
2807             }
2808             if ((pool != null && pool.mode < 0) ||
2809                 (failed && count.getAndDecrement() <= 1))
2810                 trySetThrown(ex != null ? ex : new CancellationException());
2811         }
2812         public final boolean exec()         { return false; } // never forked
2813         public final E getRawResult()       { return result; }
2814         public final void setRawResult(E v) { result = v; }
2815     }
2816 
2817     // Variant of AdaptedInterruptibleCallable with results in InvokeAnyRoot
2818     static final class InvokeAnyTask<E> extends ForkJoinTask<E> {
2819         private static final long serialVersionUID = 2838392045355241008L;
2820         final InvokeAnyRoot<E> root;
2821         @SuppressWarnings("serial") // Conditionally serializable
2822         final Callable<E> callable;
2823         transient volatile Thread runner;
2824         InvokeAnyTask(InvokeAnyRoot<E> root, Callable<E> callable) {
2825             this.root = root;
2826             this.callable = callable;
2827         }
2828         public final boolean exec() {
2829             Thread.interrupted();
2830             runner = Thread.currentThread();
2831             root.tryComplete(callable);
2832             runner = null;
2833             Thread.interrupted();
2834             return true;
2835         }
2836         public final boolean cancel(boolean mayInterruptIfRunning) {
2837             Thread t;
2838             boolean stat = super.cancel(false);
2839             if (mayInterruptIfRunning && (t = runner) != null) {
2840                 try {
2841                     t.interrupt();
2842                 } catch (Throwable ignore) {
2843                 }
2844             }
2845             return stat;
2846         }
2847         public final void setRawResult(E v) {} // unused
2848         public final E getRawResult()       { return null; }
2849     }
2850 
2851     @Override
2852     public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
2853         throws InterruptedException, ExecutionException {
2854         int n = tasks.size();
2855         if (n <= 0)
2856             throw new IllegalArgumentException();
2857         InvokeAnyRoot<T> root = new InvokeAnyRoot<T>(n, this);
2858         ArrayList<InvokeAnyTask<T>> fs = new ArrayList<>(n);
2859         try {
2860             for (Callable<T> c : tasks) {
2861                 if (c == null)
2862                     throw new NullPointerException();
2863                 InvokeAnyTask<T> f = new InvokeAnyTask<T>(root, c);
2864                 fs.add(f);
2865                 externalSubmit(f);
2866                 if (root.isDone())
2867                     break;
2868             }
2869             return root.getForPoolInvoke(this);
2870         } finally {
2871             for (InvokeAnyTask<T> f : fs)
2872                 ForkJoinTask.cancelIgnoringExceptions(f);
2873         }
2874     }
2875 
2876     @Override
2877     public <T> T invokeAny(Collection<? extends Callable<T>> tasks,
2878                            long timeout, TimeUnit unit)
2879         throws InterruptedException, ExecutionException, TimeoutException {
2880         long nanos = unit.toNanos(timeout);
2881         int n = tasks.size();
2882         if (n <= 0)
2883             throw new IllegalArgumentException();
2884         InvokeAnyRoot<T> root = new InvokeAnyRoot<T>(n, this);
2885         ArrayList<InvokeAnyTask<T>> fs = new ArrayList<>(n);
2886         try {
2887             for (Callable<T> c : tasks) {
2888                 if (c == null)
2889                     throw new NullPointerException();
2890                 InvokeAnyTask<T> f = new InvokeAnyTask<T>(root, c);
2891                 fs.add(f);
2892                 externalSubmit(f);
2893                 if (root.isDone())
2894                     break;
2895             }
2896             return root.getForPoolInvoke(this, nanos);
2897         } finally {
2898             for (InvokeAnyTask<T> f : fs)
2899                 ForkJoinTask.cancelIgnoringExceptions(f);
2900         }
2901     }
2902 
2903     /**
2904      * Returns the factory used for constructing new workers.
2905      *
2906      * @return the factory used for constructing new workers
2907      */
2908     public ForkJoinWorkerThreadFactory getFactory() {
2909         return factory;
2910     }
2911 
2912     /**
2913      * Returns the handler for internal worker threads that terminate
2914      * due to unrecoverable errors encountered while executing tasks.
2915      *
2916      * @return the handler, or {@code null} if none
2917      */
2918     public UncaughtExceptionHandler getUncaughtExceptionHandler() {
2919         return ueh;
2920     }
2921 
2922     /**
2923      * Returns the targeted parallelism level of this pool.
2924      *
2925      * @return the targeted parallelism level of this pool
2926      */
2927     public int getParallelism() {
2928         int par = mode & SMASK;
2929         return (par > 0) ? par : 1;
2930     }
2931 
2932     /**
2933      * Returns the targeted parallelism level of the common pool.
2934      *
2935      * @return the targeted parallelism level of the common pool
2936      * @since 1.8
2937      */
2938     public static int getCommonPoolParallelism() {
2939         return CommonPool.COMMON_PARALLELISM;
2940     }
2941 
2942     /**
2943      * Returns the number of worker threads that have started but not
2944      * yet terminated.  The result returned by this method may differ
2945      * from {@link #getParallelism} when threads are created to
2946      * maintain parallelism when others are cooperatively blocked.
2947      *
2948      * @return the number of worker threads
2949      */
2950     public int getPoolSize() {
2951         return ((mode & SMASK) + (short)(ctl >>> TC_SHIFT));
2952     }
2953 
2954     /**
2955      * Returns {@code true} if this pool uses local first-in-first-out
2956      * scheduling mode for forked tasks that are never joined.
2957      *
2958      * @return {@code true} if this pool uses async mode
2959      */
2960     public boolean getAsyncMode() {
2961         return (mode & FIFO) != 0;
2962     }
2963 
2964     /**
2965      * Returns an estimate of the number of worker threads that are
2966      * not blocked waiting to join tasks or for other managed
2967      * synchronization. This method may overestimate the
2968      * number of running threads.
2969      *
2970      * @return the number of worker threads
2971      */
2972     public int getRunningThreadCount() {
2973         VarHandle.acquireFence();
2974         WorkQueue[] qs; WorkQueue q;
2975         int rc = 0;
2976         if ((qs = queues) != null) {
2977             for (int i = 1; i < qs.length; i += 2) {
2978                 if ((q = qs[i]) != null && q.isApparentlyUnblocked())
2979                     ++rc;
2980             }
2981         }
2982         return rc;
2983     }
2984 
2985     /**
2986      * Returns an estimate of the number of threads that are currently
2987      * stealing or executing tasks. This method may overestimate the
2988      * number of active threads.
2989      *
2990      * @return the number of active threads
2991      */
2992     public int getActiveThreadCount() {
2993         int r = (mode & SMASK) + (int)(ctl >> RC_SHIFT);
2994         return (r <= 0) ? 0 : r; // suppress momentarily negative values
2995     }
2996 
2997     /**
2998      * Returns {@code true} if all worker threads are currently idle.
2999      * An idle worker is one that cannot obtain a task to execute
3000      * because none are available to steal from other threads, and
3001      * there are no pending submissions to the pool. This method is
3002      * conservative; it might not return {@code true} immediately upon
3003      * idleness of all threads, but will eventually become true if
3004      * threads remain inactive.
3005      *
3006      * @return {@code true} if all threads are currently idle
3007      */
3008     public boolean isQuiescent() {
3009         return canStop();
3010     }
3011 
3012     /**
3013      * Returns an estimate of the total number of completed tasks that
3014      * were executed by a thread other than their submitter. The
3015      * reported value underestimates the actual total number of steals
3016      * when the pool is not quiescent. This value may be useful for
3017      * monitoring and tuning fork/join programs: in general, steal
3018      * counts should be high enough to keep threads busy, but low
3019      * enough to avoid overhead and contention across threads.
3020      *
3021      * @return the number of steals
3022      */
3023     public long getStealCount() {
3024         long count = stealCount;
3025         WorkQueue[] qs; WorkQueue q;
3026         if ((qs = queues) != null) {
3027             for (int i = 1; i < qs.length; i += 2) {
3028                 if ((q = qs[i]) != null)
3029                     count += (long)q.nsteals & 0xffffffffL;
3030             }
3031         }
3032         return count;
3033     }
3034 
3035     /**
3036      * Returns an estimate of the total number of tasks currently held
3037      * in queues by worker threads (but not including tasks submitted
3038      * to the pool that have not begun executing). This value is only
3039      * an approximation, obtained by iterating across all threads in
3040      * the pool. This method may be useful for tuning task
3041      * granularities.
3042      *
3043      * @return the number of queued tasks
3044      */
3045     public long getQueuedTaskCount() {
3046         VarHandle.acquireFence();
3047         WorkQueue[] qs; WorkQueue q;
3048         int count = 0;
3049         if ((qs = queues) != null) {
3050             for (int i = 1; i < qs.length; i += 2) {
3051                 if ((q = qs[i]) != null)
3052                     count += q.queueSize();
3053             }
3054         }
3055         return count;
3056     }
3057 
3058     /**
3059      * Returns an estimate of the number of tasks submitted to this
3060      * pool that have not yet begun executing.  This method may take
3061      * time proportional to the number of submissions.
3062      *
3063      * @return the number of queued submissions
3064      */
3065     public int getQueuedSubmissionCount() {
3066         VarHandle.acquireFence();
3067         WorkQueue[] qs; WorkQueue q;
3068         int count = 0;
3069         if ((qs = queues) != null) {
3070             for (int i = 0; i < qs.length; i += 2) {
3071                 if ((q = qs[i]) != null)
3072                     count += q.queueSize();
3073             }
3074         }
3075         return count;
3076     }
3077 
3078     /**
3079      * Returns {@code true} if there are any tasks submitted to this
3080      * pool that have not yet begun executing.
3081      *
3082      * @return {@code true} if there are any queued submissions
3083      */
3084     public boolean hasQueuedSubmissions() {
3085         VarHandle.acquireFence();
3086         WorkQueue[] qs; WorkQueue q;
3087         if ((qs = queues) != null) {
3088             for (int i = 0; i < qs.length; i += 2) {
3089                 if ((q = qs[i]) != null && !q.isEmpty())
3090                     return true;
3091             }
3092         }
3093         return false;
3094     }
3095 
3096     /**
3097      * Removes and returns the next unexecuted submission if one is
3098      * available.  This method may be useful in extensions to this
3099      * class that re-assign work in systems with multiple pools.
3100      *
3101      * @return the next submission, or {@code null} if none
3102      */
3103     protected ForkJoinTask<?> pollSubmission() {
3104         return pollScan(true);
3105     }
3106 
3107     /**
3108      * Removes all available unexecuted submitted and forked tasks
3109      * from scheduling queues and adds them to the given collection,
3110      * without altering their execution status. These may include
3111      * artificially generated or wrapped tasks. This method is
3112      * designed to be invoked only when the pool is known to be
3113      * quiescent. Invocations at other times may not remove all
3114      * tasks. A failure encountered while attempting to add elements
3115      * to collection {@code c} may result in elements being in
3116      * neither, either or both collections when the associated
3117      * exception is thrown.  The behavior of this operation is
3118      * undefined if the specified collection is modified while the
3119      * operation is in progress.
3120      *
3121      * @param c the collection to transfer elements into
3122      * @return the number of elements transferred
3123      */
3124     protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) {
3125         int count = 0;
3126         for (ForkJoinTask<?> t; (t = pollScan(false)) != null; ) {
3127             c.add(t);
3128             ++count;
3129         }
3130         return count;
3131     }
3132 
3133     /**
3134      * Returns a string identifying this pool, as well as its state,
3135      * including indications of run state, parallelism level, and
3136      * worker and task counts.
3137      *
3138      * @return a string identifying this pool, as well as its state
3139      */
3140     public String toString() {
3141         // Use a single pass through queues to collect counts
3142         int md = mode; // read volatile fields first
3143         long c = ctl;
3144         long st = stealCount;
3145         long qt = 0L, ss = 0L; int rc = 0;
3146         WorkQueue[] qs; WorkQueue q;
3147         if ((qs = queues) != null) {
3148             for (int i = 0; i < qs.length; ++i) {
3149                 if ((q = qs[i]) != null) {
3150                     int size = q.queueSize();
3151                     if ((i & 1) == 0)
3152                         ss += size;
3153                     else {
3154                         qt += size;
3155                         st += (long)q.nsteals & 0xffffffffL;
3156                         if (q.isApparentlyUnblocked())
3157                             ++rc;
3158                     }
3159                 }
3160             }
3161         }
3162 
3163         int pc = (md & SMASK);
3164         int tc = pc + (short)(c >>> TC_SHIFT);
3165         int ac = pc + (int)(c >> RC_SHIFT);
3166         if (ac < 0) // ignore transient negative
3167             ac = 0;
3168         String level = ((md & TERMINATED) != 0 ? "Terminated" :
3169                         (md & STOP)       != 0 ? "Terminating" :
3170                         (md & SHUTDOWN)   != 0 ? "Shutting down" :
3171                         "Running");
3172         return super.toString() +
3173             "[" + level +
3174             ", parallelism = " + pc +
3175             ", size = " + tc +
3176             ", active = " + ac +
3177             ", running = " + rc +
3178             ", steals = " + st +
3179             ", tasks = " + qt +
3180             ", submissions = " + ss +
3181             "]";
3182     }
3183 
3184     /**
3185      * Possibly initiates an orderly shutdown in which previously
3186      * submitted tasks are executed, but no new tasks will be
3187      * accepted. Invocation has no effect on execution state if this
3188      * is the {@link #commonPool()}, and no additional effect if
3189      * already shut down.  Tasks that are in the process of being
3190      * submitted concurrently during the course of this method may or
3191      * may not be rejected.
3192      *
3193      * @throws SecurityException if a security manager exists and
3194      *         the caller is not permitted to modify threads
3195      *         because it does not hold {@link
3196      *         java.lang.RuntimePermission}{@code ("modifyThread")}
3197      */
3198     public void shutdown() {
3199         checkPermission();
3200         if (this != CommonPool.common)
3201             tryTerminate(false, true);
3202     }
3203 
3204     /**
3205      * Possibly attempts to cancel and/or stop all tasks, and reject
3206      * all subsequently submitted tasks.  Invocation has no effect on
3207      * execution state if this is the {@link #commonPool()}, and no
3208      * additional effect if already shut down. Otherwise, tasks that
3209      * are in the process of being submitted or executed concurrently
3210      * during the course of this method may or may not be
3211      * rejected. This method cancels both existing and unexecuted
3212      * tasks, in order to permit termination in the presence of task
3213      * dependencies. So the method always returns an empty list
3214      * (unlike the case for some other Executors).
3215      *
3216      * @return an empty list
3217      * @throws SecurityException if a security manager exists and
3218      *         the caller is not permitted to modify threads
3219      *         because it does not hold {@link
3220      *         java.lang.RuntimePermission}{@code ("modifyThread")}
3221      */
3222     public List<Runnable> shutdownNow() {
3223         checkPermission();
3224         if (this != CommonPool.common)
3225             tryTerminate(true, true);
3226         return Collections.emptyList();
3227     }
3228 
3229     /**
3230      * Returns {@code true} if all tasks have completed following shut down.
3231      *
3232      * @return {@code true} if all tasks have completed following shut down
3233      */
3234     public boolean isTerminated() {
3235         return (mode & TERMINATED) != 0;
3236     }
3237 
3238     /**
3239      * Returns {@code true} if the process of termination has
3240      * commenced but not yet completed.  This method may be useful for
3241      * debugging. A return of {@code true} reported a sufficient
3242      * period after shutdown may indicate that submitted tasks have
3243      * ignored or suppressed interruption, or are waiting for I/O,
3244      * causing this executor not to properly terminate. (See the
3245      * advisory notes for class {@link ForkJoinTask} stating that
3246      * tasks should not normally entail blocking operations.  But if
3247      * they do, they must abort them on interrupt.)
3248      *
3249      * @return {@code true} if terminating but not yet terminated
3250      */
3251     public boolean isTerminating() {
3252         return (mode & (STOP | TERMINATED)) == STOP;
3253     }
3254 
3255     /**
3256      * Returns {@code true} if this pool has been shut down.
3257      *
3258      * @return {@code true} if this pool has been shut down
3259      */
3260     public boolean isShutdown() {
3261         return (mode & SHUTDOWN) != 0;
3262     }
3263 
3264     /**
3265      * Blocks until all tasks have completed execution after a
3266      * shutdown request, or the timeout occurs, or the current thread
3267      * is interrupted, whichever happens first. Because the {@link
3268      * #commonPool()} never terminates until program shutdown, when
3269      * applied to the common pool, this method is equivalent to {@link
3270      * #awaitQuiescence(long, TimeUnit)} but always returns {@code false}.
3271      *
3272      * @param timeout the maximum time to wait
3273      * @param unit the time unit of the timeout argument
3274      * @return {@code true} if this executor terminated and
3275      *         {@code false} if the timeout elapsed before termination
3276      * @throws InterruptedException if interrupted while waiting
3277      */
3278     public boolean awaitTermination(long timeout, TimeUnit unit)
3279         throws InterruptedException {
3280         ReentrantLock lock; Condition cond;
3281         long nanos = unit.toNanos(timeout);
3282         boolean terminated = false;
3283         if (this == CommonPool.common) {
3284             Thread t; ForkJoinWorkerThread wt; int q;
3285             if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread &&
3286                 (wt = (ForkJoinWorkerThread)t).pool == this)
3287                 q = helpQuiescePool(wt.workQueue, nanos, true);
3288             else
3289                 q = externalHelpQuiescePool(nanos, true);
3290             if (q < 0)
3291                 throw new InterruptedException();
3292         }
3293         else if (!(terminated = ((mode & TERMINATED) != 0)) &&
3294                  (lock = registrationLock) != null) {
3295             lock.lock();
3296             try {
3297                 if ((cond = termination) == null)
3298                     termination = cond = lock.newCondition();
3299                 while (!(terminated = ((mode & TERMINATED) != 0)) && nanos > 0L)
3300                     nanos = cond.awaitNanos(nanos);
3301             } finally {
3302                 lock.unlock();
3303             }
3304         }
3305         return terminated;
3306     }
3307 
3308     /**
3309      * If called by a ForkJoinTask operating in this pool, equivalent
3310      * in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise,
3311      * waits and/or attempts to assist performing tasks until this
3312      * pool {@link #isQuiescent} or the indicated timeout elapses.
3313      *
3314      * @param timeout the maximum time to wait
3315      * @param unit the time unit of the timeout argument
3316      * @return {@code true} if quiescent; {@code false} if the
3317      * timeout elapsed.
3318      */
3319     public boolean awaitQuiescence(long timeout, TimeUnit unit) {
3320         Thread t; ForkJoinWorkerThread wt; int q;
3321         long nanos = unit.toNanos(timeout);
3322         if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread &&
3323             (wt = (ForkJoinWorkerThread)t).pool == this)
3324             q = helpQuiescePool(wt.workQueue, nanos, false);
3325         else
3326             q = externalHelpQuiescePool(nanos, false);
3327         return (q > 0);
3328     }
3329 
3330     /**
3331      * Interface for extending managed parallelism for tasks running
3332      * in {@link ForkJoinPool}s.
3333      *
3334      * <p>A {@code ManagedBlocker} provides two methods.  Method
3335      * {@link #isReleasable} must return {@code true} if blocking is
3336      * not necessary. Method {@link #block} blocks the current thread
3337      * if necessary (perhaps internally invoking {@code isReleasable}
3338      * before actually blocking). These actions are performed by any
3339      * thread invoking {@link
3340      * ForkJoinPool#managedBlock(ManagedBlocker)}.  The unusual
3341      * methods in this API accommodate synchronizers that may, but
3342      * don't usually, block for long periods. Similarly, they allow
3343      * more efficient internal handling of cases in which additional
3344      * workers may be, but usually are not, needed to ensure
3345      * sufficient parallelism.  Toward this end, implementations of
3346      * method {@code isReleasable} must be amenable to repeated
3347      * invocation. Neither method is invoked after a prior invocation
3348      * of {@code isReleasable} or {@code block} returns {@code true}.
3349      *
3350      * <p>For example, here is a ManagedBlocker based on a
3351      * ReentrantLock:
3352      * <pre> {@code
3353      * class ManagedLocker implements ManagedBlocker {
3354      *   final ReentrantLock lock;
3355      *   boolean hasLock = false;
3356      *   ManagedLocker(ReentrantLock lock) { this.lock = lock; }
3357      *   public boolean block() {
3358      *     if (!hasLock)
3359      *       lock.lock();
3360      *     return true;
3361      *   }
3362      *   public boolean isReleasable() {
3363      *     return hasLock || (hasLock = lock.tryLock());
3364      *   }
3365      * }}</pre>
3366      *
3367      * <p>Here is a class that possibly blocks waiting for an
3368      * item on a given queue:
3369      * <pre> {@code
3370      * class QueueTaker<E> implements ManagedBlocker {
3371      *   final BlockingQueue<E> queue;
3372      *   volatile E item = null;
3373      *   QueueTaker(BlockingQueue<E> q) { this.queue = q; }
3374      *   public boolean block() throws InterruptedException {
3375      *     if (item == null)
3376      *       item = queue.take();
3377      *     return true;
3378      *   }
3379      *   public boolean isReleasable() {
3380      *     return item != null || (item = queue.poll()) != null;
3381      *   }
3382      *   public E getItem() { // call after pool.managedBlock completes
3383      *     return item;
3384      *   }
3385      * }}</pre>
3386      */
3387     public static interface ManagedBlocker {
3388         /**
3389          * Possibly blocks the current thread, for example waiting for
3390          * a lock or condition.
3391          *
3392          * @return {@code true} if no additional blocking is necessary
3393          * (i.e., if isReleasable would return true)
3394          * @throws InterruptedException if interrupted while waiting
3395          * (the method is not required to do so, but is allowed to)
3396          */
3397         boolean block() throws InterruptedException;
3398 
3399         /**
3400          * Returns {@code true} if blocking is unnecessary.
3401          * @return {@code true} if blocking is unnecessary
3402          */
3403         boolean isReleasable();
3404     }
3405 
3406     /**
3407      * Runs the given possibly blocking task.  When {@linkplain
3408      * ForkJoinTask#inForkJoinPool() running in a ForkJoinPool}, this
3409      * method possibly arranges for a spare thread to be activated if
3410      * necessary to ensure sufficient parallelism while the current
3411      * thread is blocked in {@link ManagedBlocker#block blocker.block()}.
3412      *
3413      * <p>This method repeatedly calls {@code blocker.isReleasable()} and
3414      * {@code blocker.block()} until either method returns {@code true}.
3415      * Every call to {@code blocker.block()} is preceded by a call to
3416      * {@code blocker.isReleasable()} that returned {@code false}.
3417      *
3418      * <p>If not running in a ForkJoinPool, this method is
3419      * behaviorally equivalent to
3420      * <pre> {@code
3421      * while (!blocker.isReleasable())
3422      *   if (blocker.block())
3423      *     break;}</pre>
3424      *
3425      * If running in a ForkJoinPool, the pool may first be expanded to
3426      * ensure sufficient parallelism available during the call to
3427      * {@code blocker.block()}.
3428      *
3429      * @param blocker the blocker task
3430      * @throws InterruptedException if {@code blocker.block()} did so
3431      */
3432     public static void managedBlock(ManagedBlocker blocker)
3433         throws InterruptedException {
3434         Thread t; ForkJoinPool p;
3435         if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread &&
3436             (p = ((ForkJoinWorkerThread)t).pool) != null)
3437             p.compensatedBlock(blocker);
3438         else
3439             unmanagedBlock(blocker);
3440     }
3441 
3442     /** ManagedBlock for ForkJoinWorkerThreads */
3443     private void compensatedBlock(ManagedBlocker blocker)
3444         throws InterruptedException {
3445         if (blocker == null) throw new NullPointerException();
3446         for (;;) {
3447             int comp; boolean done;
3448             long c = ctl;
3449             if (blocker.isReleasable())
3450                 break;
3451             if ((comp = tryCompensate(c)) >= 0) {
3452                 long post = (comp == 0) ? 0L : RC_UNIT;
3453                 try {
3454                     done = blocker.block();
3455                 } finally {
3456                     getAndAddCtl(post);
3457                 }
3458                 if (done)
3459                     break;
3460             }
3461         }
3462     }
3463 
3464     /** ManagedBlock for external threads */
3465     private static void unmanagedBlock(ManagedBlocker blocker)
3466         throws InterruptedException {
3467         if (blocker == null) throw new NullPointerException();
3468         do {} while (!blocker.isReleasable() && !blocker.block());
3469     }
3470 
3471     // AbstractExecutorService.newTaskFor overrides rely on
3472     // undocumented fact that ForkJoinTask.adapt returns ForkJoinTasks
3473     // that also implement RunnableFuture.
3474 
3475     @Override
3476     protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) {
3477         return new ForkJoinTask.AdaptedRunnable<T>(runnable, value);
3478     }
3479 
3480     @Override
3481     protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) {
3482         return new ForkJoinTask.AdaptedCallable<T>(callable);
3483     }
3484 
3485     static {
3486         try {
3487             MethodHandles.Lookup l = MethodHandles.lookup();
3488             CTL = l.findVarHandle(ForkJoinPool.class, "ctl", long.class);
3489             MODE = l.findVarHandle(ForkJoinPool.class, "mode", int.class);
3490             THREADIDS = l.findVarHandle(ForkJoinPool.class, "threadIds", int.class);
3491             POOLIDS = l.findStaticVarHandle(ForkJoinPool.class, "poolIds", int.class);
3492         } catch (ReflectiveOperationException e) {
3493             throw new ExceptionInInitializerError(e);
3494         }
3495 
3496         // Reduce the risk of rare disastrous classloading in first call to
3497         // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
3498         Class<?> ensureLoaded = LockSupport.class;
3499 
3500         defaultForkJoinWorkerThreadFactory = new DefaultForkJoinWorkerThreadFactory();










3501         modifyThreadPermission = new RuntimePermission("modifyThread");
3502     }
3503 
3504     static class CommonPool {
3505         /**
3506          * Common (static) pool. Non-null for public use unless a static
3507          * construction exception, but internal usages null-check on use
3508          * to paranoically avoid potential initialization circularities
3509          * as well as to simplify generated code.
3510          */
3511         static final ForkJoinPool common;
3512 
3513         /**
3514          * Common pool parallelism. To allow simpler use and management
3515          * when common pool threads are disabled, we allow the underlying
3516          * common.parallelism field to be zero, but in that case still report
3517          * parallelism as 1 to reflect resulting caller-runs mechanics.
3518          */
3519         static final int COMMON_PARALLELISM;
3520 
3521         /**
3522          * Limit on spare thread construction in tryCompensate.
3523          */
3524         private static final int COMMON_MAX_SPARES;
3525 
3526         /**
3527          * The default value for COMMON_MAX_SPARES.  Overridable using the
3528          * "java.util.concurrent.ForkJoinPool.common.maximumSpares" system
3529          * property.  The default value is far in excess of normal
3530          * requirements, but also far short of MAX_CAP and typical OS
3531          * thread limits, so allows JVMs to catch misuse/abuse before
3532          * running out of resources needed to do so.
3533          */
3534         private static final int DEFAULT_COMMON_MAX_SPARES = 256;
3535 
3536         static {
3537             int commonMaxSpares = DEFAULT_COMMON_MAX_SPARES;
3538             try {
3539                 String p = System.getProperty
3540                     ("java.util.concurrent.ForkJoinPool.common.maximumSpares");
3541                 if (p != null)
3542                     commonMaxSpares = Integer.parseInt(p);
3543             } catch (Exception ignore) {}
3544             COMMON_MAX_SPARES = commonMaxSpares;
3545 
3546             @SuppressWarnings("removal")
3547             ForkJoinPool tmp = AccessController.doPrivileged(new PrivilegedAction<>() {
3548                 public ForkJoinPool run() {
3549                     return new ForkJoinPool((byte)0); }});
3550             common = tmp;
3551 
3552             COMMON_PARALLELISM = Math.max(common.mode & SMASK, 1);
3553         }
3554     }
3555 }
--- EOF ---