• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
<lambda>null2  * Copyright 2016-2020 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license.
3  */
4 
5 package kotlinx.coroutines.scheduling
6 
7 import kotlinx.atomicfu.*
8 import kotlinx.coroutines.*
9 import kotlinx.coroutines.internal.*
10 import java.io.*
11 import java.util.concurrent.*
12 import java.util.concurrent.atomic.*
13 import java.util.concurrent.locks.*
14 import kotlin.math.*
15 import kotlin.random.*
16 
17 /**
18  * Coroutine scheduler (pool of shared threads) which primary target is to distribute dispatched coroutines
19  * over worker threads, including both CPU-intensive and blocking tasks, is the most efficient manner.
20  *
21  * Current scheduler implementation has two optimization targets:
22  * * Efficiency in the face of communication patterns (e.g., actors communicating via channel)
23  * * Dynamic resizing to support blocking calls without re-dispatching coroutine to separate "blocking" thread pool.
24  *
25  * ### Structural overview
26  *
27  * Scheduler consists of [corePoolSize] worker threads to execute CPU-bound tasks and up to
28  * [maxPoolSize] lazily created  threads to execute blocking tasks.
29  * Every worker has a local queue in addition to a global scheduler queue
30  * and the global queue has priority over local queue to avoid starvation of externally-submitted
31  * (e.g. from Android UI thread) tasks.
32  * Work-stealing is implemented on top of that queues to provide
33  * even load distribution and illusion of centralized run queue.
34  *
35  * ### Scheduling policy
36  *
37  * When a coroutine is dispatched from within a scheduler worker, it's placed into the head of worker run queue.
38  * If the head is not empty, the task from the head is moved to the tail. Though it is an unfair scheduling policy,
39  * it effectively couples communicating coroutines into one and eliminates scheduling latency
40  * that arises from placing tasks to the end of the queue.
41  * Placing former head to the tail is necessary to provide semi-FIFO order, otherwise, queue degenerates to stack.
42  * When a coroutine is dispatched from an external thread, it's put into the global queue.
43  * The original idea with a single-slot LIFO buffer comes from Golang runtime scheduler by D. Vyukov.
44  * It was proven to be "fair enough", performant and generally well accepted and initially was a significant inspiration
45  * source for the coroutine scheduler.
46  *
47  * ### Work stealing and affinity
48  *
49  * To provide even tasks distribution worker tries to steal tasks from other workers queues
50  * before parking when his local queue is empty.
51  * A non-standard solution is implemented to provide tasks affinity: a task from FIFO buffer may be stolen
52  * only if it is stale enough based on the value of [WORK_STEALING_TIME_RESOLUTION_NS].
53  * For this purpose, monotonic global clock is used, and every task has associated with its submission time.
54  * This approach shows outstanding results when coroutines are cooperative,
55  * but as downside scheduler now depends on a high-resolution global clock,
56  * which may limit scalability on NUMA machines. Tasks from LIFO buffer can be stolen on a regular basis.
57  *
58  * ### Thread management
59  * One of the hardest parts of the scheduler is decentralized management of the threads with the progress guarantees
60  * similar to the regular centralized executors.
61  * The state of the threads consists of [controlState] and [parkedWorkersStack] fields.
62  * The former field incorporates the amount of created threads, CPU-tokens and blocking tasks
63  * that require a thread compensation,
64  * while the latter represents intrusive versioned Treiber stack of idle workers.
65  * When a worker cannot find any work, they first add themselves to the stack,
66  * then re-scans the queue to avoid missing signals and then attempts to park
67  * with additional rendezvous against unnecessary parking.
68  * If a worker finds a task that it cannot yet steal due to time constraints, it stores this fact in its state
69  * (to be uncounted when additional work is signalled) and parks for such duration.
70  *
71  * When a new task arrives in the scheduler (whether it is local or global queue),
72  * either an idle worker is being signalled, or a new worker is attempted to be created.
73  * Only [corePoolSize] workers can be created for regular CPU tasks)
74  *
75  * ### Support for blocking tasks
76  * The scheduler also supports the notion of [blocking][TASK_PROBABLY_BLOCKING] tasks.
77  * When executing or enqueuing blocking tasks, the scheduler notifies or creates one more worker in
78  * addition to core pool size, so at any given moment, it has [corePoolSize] threads (potentially not yet created)
79  * to serve CPU-bound tasks. To properly guarantee liveness, the scheduler maintains
80  * "CPU permits" -- [corePoolSize] special tokens that permit an arbitrary worker to execute and steal CPU-bound tasks.
81  * When worker encounters blocking tasks, it basically hands off its permit to another thread (not directly though) to
82  * keep invariant "scheduler always has at least min(pending CPU tasks, core pool size)
83  * and at most core pool size threads to execute CPU tasks".
84  * To avoid overprovision, workers without CPU permit are allowed to scan [globalBlockingQueue]
85  * and steal **only** blocking tasks from other workers.
86  *
87  * The scheduler does not limit the count of pending blocking tasks, potentially creating up to [maxPoolSize] threads.
88  * End users do not have access to the scheduler directly and can dispatch blocking tasks only with
89  * [LimitingDispatcher] that does control concurrency level by its own mechanism.
90  */
91 @Suppress("NOTHING_TO_INLINE")
92 internal class CoroutineScheduler(
93     @JvmField val corePoolSize: Int,
94     @JvmField val maxPoolSize: Int,
95     @JvmField val idleWorkerKeepAliveNs: Long = IDLE_WORKER_KEEP_ALIVE_NS,
96     @JvmField val schedulerName: String = DEFAULT_SCHEDULER_NAME
97 ) : Executor, Closeable {
98     init {
99         require(corePoolSize >= MIN_SUPPORTED_POOL_SIZE) {
100             "Core pool size $corePoolSize should be at least $MIN_SUPPORTED_POOL_SIZE"
101         }
102         require(maxPoolSize >= corePoolSize) {
103             "Max pool size $maxPoolSize should be greater than or equals to core pool size $corePoolSize"
104         }
105         require(maxPoolSize <= MAX_SUPPORTED_POOL_SIZE) {
106             "Max pool size $maxPoolSize should not exceed maximal supported number of threads $MAX_SUPPORTED_POOL_SIZE"
107         }
108         require(idleWorkerKeepAliveNs > 0) {
109             "Idle worker keep alive time $idleWorkerKeepAliveNs must be positive"
110         }
111     }
112 
113     @JvmField
114     val globalCpuQueue = GlobalQueue()
115     @JvmField
116     val globalBlockingQueue = GlobalQueue()
117 
118     private fun addToGlobalQueue(task: Task): Boolean {
119         return if (task.isBlocking) {
120             globalBlockingQueue.addLast(task)
121         } else {
122             globalCpuQueue.addLast(task)
123         }
124     }
125 
126     /**
127      * The stack of parker workers.
128      * Every worker registers itself in a stack before parking (if it was not previously registered),
129      * so it can be signalled when new tasks arrive.
130      * This is a form of intrusive garbage-free Treiber stack where [Worker] also is a stack node.
131      *
132      * The stack is better than a queue (even with the contention on top) because it unparks threads
133      * in most-recently used order, improving both performance and locality.
134      * Moreover, it decreases threads thrashing, if the pool has n threads when only n / 2 is required,
135      * the latter half will never be unparked and will terminate itself after [IDLE_WORKER_KEEP_ALIVE_NS].
136      *
137      * This long version consist of version bits with [PARKED_VERSION_MASK]
138      * and top worker thread index bits with [PARKED_INDEX_MASK].
139      */
140     private val parkedWorkersStack = atomic(0L)
141 
142     /**
143      * Updates index of the worker at the top of [parkedWorkersStack].
144      * It always updates version to ensure interference with [parkedWorkersStackPop] operation
145      * that might have already decided to put this index to the top.
146      *
147      * Note, [newIndex] can be zero for the worker that is being terminated (removed from [workers]).
148      */
149     internal fun parkedWorkersStackTopUpdate(worker: Worker, oldIndex: Int, newIndex: Int) {
150         parkedWorkersStack.loop { top ->
151             val index = (top and PARKED_INDEX_MASK).toInt()
152             val updVersion = (top + PARKED_VERSION_INC) and PARKED_VERSION_MASK
153             val updIndex = if (index == oldIndex) {
154                 if (newIndex == 0) {
155                     parkedWorkersStackNextIndex(worker)
156                 } else {
157                     newIndex
158                 }
159             } else {
160                 index // no change to index, but update version
161             }
162             if (updIndex < 0) return@loop // retry
163             if (parkedWorkersStack.compareAndSet(top, updVersion or updIndex.toLong())) return
164         }
165     }
166 
167     /**
168      * Pushes worker into [parkedWorkersStack].
169      * It does nothing is this worker is already physically linked to the stack.
170      * This method is invoked only from the worker thread itself.
171      * This invocation always precedes [LockSupport.parkNanos].
172      * See [Worker.doPark].
173      *
174      * Returns `true` if worker was added to the stack by this invocation, `false` if it was already
175      * registered in the stack.
176      */
177     internal fun parkedWorkersStackPush(worker: Worker): Boolean {
178         if (worker.nextParkedWorker !== NOT_IN_STACK) return false // already in stack, bail out
179         /*
180          * The below loop can be entered only if this worker was not in the stack and, since no other thread
181          * can add it to the stack (only the worker itself), this invariant holds while this loop executes.
182          */
183         parkedWorkersStack.loop { top ->
184             val index = (top and PARKED_INDEX_MASK).toInt()
185             val updVersion = (top + PARKED_VERSION_INC) and PARKED_VERSION_MASK
186             val updIndex = worker.indexInArray
187             assert { updIndex != 0 } // only this worker can push itself, cannot be terminated
188             worker.nextParkedWorker = workers[index]
189             /*
190              * Other thread can be changing this worker's index at this point, but it
191              * also invokes parkedWorkersStackTopUpdate which updates version to make next CAS fail.
192              * Successful CAS of the stack top completes successful push.
193              */
194             if (parkedWorkersStack.compareAndSet(top, updVersion or updIndex.toLong())) return true
195         }
196     }
197 
198     /**
199      * Pops worker from [parkedWorkersStack].
200      * It can be invoked concurrently from any thread that is looking for help and needs to unpark some worker.
201      * This invocation is always followed by an attempt to [LockSupport.unpark] resulting worker.
202      * See [tryUnpark].
203      */
204     private fun parkedWorkersStackPop(): Worker? {
205         parkedWorkersStack.loop { top ->
206             val index = (top and PARKED_INDEX_MASK).toInt()
207             val worker = workers[index] ?: return null // stack is empty
208             val updVersion = (top + PARKED_VERSION_INC) and PARKED_VERSION_MASK
209             val updIndex = parkedWorkersStackNextIndex(worker)
210             if (updIndex < 0) return@loop // retry
211             /*
212              * Other thread can be changing this worker's index at this point, but it
213              * also invokes parkedWorkersStackTopUpdate which updates version to make next CAS fail.
214              * Successful CAS of the stack top completes successful pop.
215              */
216             if (parkedWorkersStack.compareAndSet(top, updVersion or updIndex.toLong())) {
217                 /*
218                  * We've just took worker out of the stack, but nextParkerWorker is not reset yet, so if a worker is
219                  * currently invoking parkedWorkersStackPush it would think it is in the stack and bail out without
220                  * adding itself again. It does not matter, since we are going it invoke unpark on the thread
221                  * that was popped out of parkedWorkersStack anyway.
222                  */
223                 worker.nextParkedWorker = NOT_IN_STACK
224                 return worker
225             }
226         }
227     }
228 
229     /**
230      * Finds next usable index for [parkedWorkersStack]. The problem is that workers can
231      * be terminated at their [Worker.indexInArray] becomes zero, so they cannot be
232      * put at the top of the stack. In which case we are looking for next.
233      *
234      * Returns `index >= 0` or `-1` for retry.
235      */
236     private fun parkedWorkersStackNextIndex(worker: Worker): Int {
237         var next = worker.nextParkedWorker
238         findNext@ while (true) {
239             when {
240                 next === NOT_IN_STACK -> return -1 // we are too late -- other thread popped this element, retry
241                 next === null -> return 0 // stack becomes empty
242                 else -> {
243                     val nextWorker = next as Worker
244                     val updIndex = nextWorker.indexInArray
245                     if (updIndex != 0) return updIndex // found good index for next worker
246                     // Otherwise, this worker was terminated and we cannot put it to top anymore, check next
247                     next = nextWorker.nextParkedWorker
248                 }
249             }
250         }
251     }
252 
253     /**
254      * State of worker threads.
255      * [workers] is array of lazily created workers up to [maxPoolSize] workers.
256      * [createdWorkers] is count of already created workers (worker with index lesser than [createdWorkers] exists).
257      * [blockingTasks] is count of pending (either in the queue or being executed) tasks
258      *
259      * **NOTE**: `workers[0]` is always `null` (never used, works as sentinel value), so
260      * workers are 1-indexed, code path in [Worker.trySteal] is a bit faster and index swap during termination
261      * works properly
262      */
263     @JvmField
264     val workers = AtomicReferenceArray<Worker?>(maxPoolSize + 1)
265 
266     /**
267      * Long describing state of workers in this pool.
268      * Currently includes created, CPU-acquired and blocking workers each occupying [BLOCKING_SHIFT] bits.
269      */
270     private val controlState = atomic(corePoolSize.toLong() shl CPU_PERMITS_SHIFT)
271     private val createdWorkers: Int inline get() = (controlState.value and CREATED_MASK).toInt()
272     private val availableCpuPermits: Int inline get() = availableCpuPermits(controlState.value)
273 
274     private inline fun createdWorkers(state: Long): Int = (state and CREATED_MASK).toInt()
275     private inline fun blockingTasks(state: Long): Int = (state and BLOCKING_MASK shr BLOCKING_SHIFT).toInt()
276     public inline fun availableCpuPermits(state: Long): Int = (state and CPU_PERMITS_MASK shr CPU_PERMITS_SHIFT).toInt()
277 
278     // Guarded by synchronization
279     private inline fun incrementCreatedWorkers(): Int = createdWorkers(controlState.incrementAndGet())
280     private inline fun decrementCreatedWorkers(): Int = createdWorkers(controlState.getAndDecrement())
281 
282     private inline fun incrementBlockingTasks() = controlState.addAndGet(1L shl BLOCKING_SHIFT)
283 
284     private inline fun decrementBlockingTasks() {
285         controlState.addAndGet(-(1L shl BLOCKING_SHIFT))
286     }
287 
288     private inline fun tryAcquireCpuPermit(): Boolean = controlState.loop { state ->
289         val available = availableCpuPermits(state)
290         if (available == 0) return false
291         val update = state - (1L shl CPU_PERMITS_SHIFT)
292         if (controlState.compareAndSet(state, update)) return true
293     }
294 
295     private inline fun releaseCpuPermit() = controlState.addAndGet(1L shl CPU_PERMITS_SHIFT)
296 
297     // This is used a "stop signal" for close and shutdown functions
298     private val _isTerminated = atomic(false)
299     val isTerminated: Boolean get() = _isTerminated.value
300 
301     companion object {
302         // A symbol to mark workers that are not in parkedWorkersStack
303         @JvmField
304         val NOT_IN_STACK = Symbol("NOT_IN_STACK")
305 
306         // Worker ctl states
307         private const val PARKED = -1
308         private const val CLAIMED = 0
309         private const val TERMINATED = 1
310 
311         // Masks of control state
312         private const val BLOCKING_SHIFT = 21 // 2M threads max
313         private const val CREATED_MASK: Long = (1L shl BLOCKING_SHIFT) - 1
314         private const val BLOCKING_MASK: Long = CREATED_MASK shl BLOCKING_SHIFT
315         private const val CPU_PERMITS_SHIFT = BLOCKING_SHIFT * 2
316         private const val CPU_PERMITS_MASK = CREATED_MASK shl CPU_PERMITS_SHIFT
317 
318         internal const val MIN_SUPPORTED_POOL_SIZE = 1 // we support 1 for test purposes, but it is not usually used
319         internal const val MAX_SUPPORTED_POOL_SIZE = (1 shl BLOCKING_SHIFT) - 2
320 
321         // Masks of parkedWorkersStack
322         private const val PARKED_INDEX_MASK = CREATED_MASK
323         private const val PARKED_VERSION_MASK = CREATED_MASK.inv()
324         private const val PARKED_VERSION_INC = 1L shl BLOCKING_SHIFT
325     }
326 
327     override fun execute(command: Runnable) = dispatch(command)
328 
329     override fun close() = shutdown(10_000L)
330 
331     // Shuts down current scheduler and waits until all work is done and all threads are stopped.
332     fun shutdown(timeout: Long) {
333         // atomically set termination flag which is checked when workers are added or removed
334         if (!_isTerminated.compareAndSet(false, true)) return
335         // make sure we are not waiting for the current thread
336         val currentWorker = currentWorker()
337         // Capture # of created workers that cannot change anymore (mind the synchronized block!)
338         val created = synchronized(workers) { createdWorkers }
339         // Shutdown all workers with the only exception of the current thread
340         for (i in 1..created) {
341             val worker = workers[i]!!
342             if (worker !== currentWorker) {
343                 while (worker.isAlive) {
344                     LockSupport.unpark(worker)
345                     worker.join(timeout)
346                 }
347                 val state = worker.state
348                 assert { state === WorkerState.TERMINATED } // Expected TERMINATED state
349                 worker.localQueue.offloadAllWorkTo(globalBlockingQueue) // Doesn't actually matter which queue to use
350             }
351         }
352         // Make sure no more work is added to GlobalQueue from anywhere
353         globalBlockingQueue.close()
354         globalCpuQueue.close()
355         // Finish processing tasks from globalQueue and/or from this worker's local queue
356         while (true) {
357             val task = currentWorker?.findTask(true)
358                 ?: globalCpuQueue.removeFirstOrNull()
359                 ?: globalBlockingQueue.removeFirstOrNull()
360                 ?: break
361             runSafely(task)
362         }
363         // Shutdown current thread
364         currentWorker?.tryReleaseCpu(WorkerState.TERMINATED)
365         // check & cleanup state
366         assert { availableCpuPermits == corePoolSize }
367         parkedWorkersStack.value = 0L
368         controlState.value = 0L
369     }
370 
371     /**
372      * Dispatches execution of a runnable [block] with a hint to a scheduler whether
373      * this [block] may execute blocking operations (IO, system calls, locking primitives etc.)
374      *
375      * [taskContext] -- concurrency context of given [block].
376      * [tailDispatch] -- whether this [dispatch] call is the last action the (presumably) worker thread does in its current task.
377      * If `true`, then  the task will be dispatched in a FIFO manner and no additional workers will be requested,
378      * but only if the current thread is a corresponding worker thread.
379      * Note that caller cannot be ensured that it is being executed on worker thread for the following reasons:
380      *   * [CoroutineStart.UNDISPATCHED]
381      *   * Concurrent [close] that effectively shutdowns the worker thread
382      */
383     fun dispatch(block: Runnable, taskContext: TaskContext = NonBlockingContext, tailDispatch: Boolean = false) {
384         trackTask() // this is needed for virtual time support
385         val task = createTask(block, taskContext)
386         // try to submit the task to the local queue and act depending on the result
387         val currentWorker = currentWorker()
388         val notAdded = currentWorker.submitToLocalQueue(task, tailDispatch)
389         if (notAdded != null) {
390             if (!addToGlobalQueue(notAdded)) {
391                 // Global queue is closed in the last step of close/shutdown -- no more tasks should be accepted
392                 throw RejectedExecutionException("$schedulerName was terminated")
393             }
394         }
395         val skipUnpark = tailDispatch && currentWorker != null
396         // Checking 'task' instead of 'notAdded' is completely okay
397         if (task.mode == TASK_NON_BLOCKING) {
398             if (skipUnpark) return
399             signalCpuWork()
400         } else {
401             // Increment blocking tasks anyway
402             signalBlockingWork(skipUnpark = skipUnpark)
403         }
404     }
405 
406     internal fun createTask(block: Runnable, taskContext: TaskContext): Task {
407         val nanoTime = schedulerTimeSource.nanoTime()
408         if (block is Task) {
409             block.submissionTime = nanoTime
410             block.taskContext = taskContext
411             return block
412         }
413         return TaskImpl(block, nanoTime, taskContext)
414     }
415 
416     private fun signalBlockingWork(skipUnpark: Boolean) {
417         // Use state snapshot to avoid thread overprovision
418         val stateSnapshot = incrementBlockingTasks()
419         if (skipUnpark) return
420         if (tryUnpark()) return
421         if (tryCreateWorker(stateSnapshot)) return
422         tryUnpark() // Try unpark again in case there was race between permit release and parking
423     }
424 
425     internal fun signalCpuWork() {
426         if (tryUnpark()) return
427         if (tryCreateWorker()) return
428         tryUnpark()
429     }
430 
431     private fun tryCreateWorker(state: Long = controlState.value): Boolean {
432         val created = createdWorkers(state)
433         val blocking = blockingTasks(state)
434         val cpuWorkers = (created - blocking).coerceAtLeast(0)
435         /*
436          * We check how many threads are there to handle non-blocking work,
437          * and create one more if we have not enough of them.
438          */
439         if (cpuWorkers < corePoolSize) {
440             val newCpuWorkers = createNewWorker()
441             // If we've created the first cpu worker and corePoolSize > 1 then create
442             // one more (second) cpu worker, so that stealing between them is operational
443             if (newCpuWorkers == 1 && corePoolSize > 1) createNewWorker()
444             if (newCpuWorkers > 0) return true
445         }
446         return false
447     }
448 
449     private fun tryUnpark(): Boolean {
450         while (true) {
451             val worker = parkedWorkersStackPop() ?: return false
452             if (worker.workerCtl.compareAndSet(PARKED, CLAIMED)) {
453                 LockSupport.unpark(worker)
454                 return true
455             }
456         }
457     }
458 
459     /*
460      * Returns the number of CPU workers after this function (including new worker) or
461      * 0 if no worker was created.
462      */
463     private fun createNewWorker(): Int {
464         synchronized(workers) {
465             // Make sure we're not trying to resurrect terminated scheduler
466             if (isTerminated) return -1
467             val state = controlState.value
468             val created = createdWorkers(state)
469             val blocking = blockingTasks(state)
470             val cpuWorkers = (created - blocking).coerceAtLeast(0)
471             // Double check for overprovision
472             if (cpuWorkers >= corePoolSize) return 0
473             if (created >= maxPoolSize) return 0
474             // start & register new worker, commit index only after successful creation
475             val newIndex = createdWorkers + 1
476             require(newIndex > 0 && workers[newIndex] == null)
477             /*
478              * 1) Claim the slot (under a lock) by the newly created worker
479              * 2) Make it observable by increment created workers count
480              * 3) Only then start the worker, otherwise it may miss its own creation
481              */
482             val worker = Worker(newIndex)
483             workers[newIndex] = worker
484             require(newIndex == incrementCreatedWorkers())
485             worker.start()
486             return cpuWorkers + 1
487         }
488     }
489 
490     /**
491      * Returns `null` if task was successfully added or an instance of the
492      * task that was not added or replaced (thus should be added to global queue).
493      */
494     private fun Worker?.submitToLocalQueue(task: Task, tailDispatch: Boolean): Task? {
495         if (this == null) return task
496         /*
497          * This worker could have been already terminated from this thread by close/shutdown and it should not
498          * accept any more tasks into its local queue.
499          */
500         if (state === WorkerState.TERMINATED) return task
501         // Do not add CPU tasks in local queue if we are not able to execute it
502         if (task.mode == TASK_NON_BLOCKING && state === WorkerState.BLOCKING) {
503             return task
504         }
505         mayHaveLocalTasks = true
506         return localQueue.add(task, fair = tailDispatch)
507     }
508 
509     private fun currentWorker(): Worker? = (Thread.currentThread() as? Worker)?.takeIf { it.scheduler == this }
510 
511     /**
512      * Returns a string identifying the state of this scheduler for nicer debugging.
513      * Note that this method is not atomic and represents rough state of pool.
514      *
515      * State of the queues:
516      * b for blocking, c for CPU, r for retiring.
517      * E.g. for [1b, 1b, 2c, 1d] means that pool has
518      * two blocking workers with queue size 1, one worker with CPU permit and queue size 1
519      * and one dormant (executing his local queue before parking) worker with queue size 1.
520      */
521     override fun toString(): String {
522         var parkedWorkers = 0
523         var blockingWorkers = 0
524         var cpuWorkers = 0
525         var dormant = 0
526         var terminated = 0
527         val queueSizes = arrayListOf<String>()
528         for (index in 1 until workers.length()) {
529             val worker = workers[index] ?: continue
530             val queueSize = worker.localQueue.size
531             when (worker.state) {
532                 WorkerState.PARKING -> ++parkedWorkers
533                 WorkerState.BLOCKING -> {
534                     ++blockingWorkers
535                     queueSizes += queueSize.toString() + "b" // Blocking
536                 }
537                 WorkerState.CPU_ACQUIRED -> {
538                     ++cpuWorkers
539                     queueSizes += queueSize.toString() + "c" // CPU
540                 }
541                 WorkerState.DORMANT -> {
542                     ++dormant
543                     if (queueSize > 0) queueSizes += queueSize.toString() + "d" // Retiring
544                 }
545                 WorkerState.TERMINATED -> ++terminated
546             }
547         }
548         val state = controlState.value
549         return "$schedulerName@$hexAddress[" +
550                 "Pool Size {" +
551                     "core = $corePoolSize, " +
552                     "max = $maxPoolSize}, " +
553                 "Worker States {" +
554                     "CPU = $cpuWorkers, " +
555                     "blocking = $blockingWorkers, " +
556                     "parked = $parkedWorkers, " +
557                     "dormant = $dormant, " +
558                     "terminated = $terminated}, " +
559                 "running workers queues = $queueSizes, "+
560                 "global CPU queue size = ${globalCpuQueue.size}, " +
561                 "global blocking queue size = ${globalBlockingQueue.size}, " +
562                 "Control State {" +
563                     "created workers= ${createdWorkers(state)}, " +
564                     "blocking tasks = ${blockingTasks(state)}, " +
565                     "CPUs acquired = ${corePoolSize - availableCpuPermits(state)}" +
566                 "}]"
567     }
568 
569     fun runSafely(task: Task) {
570         try {
571             task.run()
572         } catch (e: Throwable) {
573             val thread = Thread.currentThread()
574             thread.uncaughtExceptionHandler.uncaughtException(thread, e)
575         } finally {
576             unTrackTask()
577         }
578     }
579 
580     internal inner class Worker private constructor() : Thread() {
581         init {
582             isDaemon = true
583         }
584 
585         // guarded by scheduler lock, index in workers array, 0 when not in array (terminated)
586         @Volatile // volatile for push/pop operation into parkedWorkersStack
587         var indexInArray = 0
588             set(index) {
589                 name = "$schedulerName-worker-${if (index == 0) "TERMINATED" else index.toString()}"
590                 field = index
591             }
592 
593         constructor(index: Int) : this() {
594             indexInArray = index
595         }
596 
597         inline val scheduler get() = this@CoroutineScheduler
598 
599         @JvmField
600         val localQueue: WorkQueue = WorkQueue()
601 
602         /**
603          * Worker state. **Updated only by this worker thread**.
604          * By default, worker is in DORMANT state in the case when it was created, but all CPU tokens or tasks were taken.
605          * Is used locally by the worker to maintain its own invariants.
606          */
607         @JvmField
608         var state = WorkerState.DORMANT
609 
610         /**
611          * Worker control state responsible for worker claiming, parking and termination.
612          * List of states:
613          * [PARKED] -- worker is parked and can self-terminate after a termination deadline.
614          * [CLAIMED] -- worker is claimed by an external submitter.
615          * [TERMINATED] -- worker is terminated and no longer usable.
616          */
617         val workerCtl = atomic(CLAIMED)
618 
619         /**
620          * It is set to the termination deadline when started doing [park] and it reset
621          * when there is a task. It servers as protection against spurious wakeups of parkNanos.
622          */
623         private var terminationDeadline = 0L
624 
625         /**
626          * Reference to the next worker in the [parkedWorkersStack].
627          * It may be `null` if there is no next parked worker.
628          * This reference is set to [NOT_IN_STACK] when worker is physically not in stack.
629          */
630         @Volatile
631         var nextParkedWorker: Any? = NOT_IN_STACK
632 
633         /*
634          * The delay until at least one task in other worker queues will  become stealable.
635          */
636         private var minDelayUntilStealableTaskNs = 0L
637 
638         private var rngState = Random.nextInt()
639 
640         /**
641          * Tries to acquire CPU token if worker doesn't have one
642          * @return whether worker acquired (or already had) CPU token
643          */
644         private fun tryAcquireCpuPermit(): Boolean = when {
645             state == WorkerState.CPU_ACQUIRED -> true
646             this@CoroutineScheduler.tryAcquireCpuPermit() -> {
647                 state = WorkerState.CPU_ACQUIRED
648                 true
649             }
650             else -> false
651         }
652 
653         /**
654          * Releases CPU token if worker has any and changes state to [newState].
655          * Returns `true` if CPU permit was returned to the pool
656          */
657         internal fun tryReleaseCpu(newState: WorkerState): Boolean {
658             val previousState = state
659             val hadCpu = previousState == WorkerState.CPU_ACQUIRED
660             if (hadCpu) releaseCpuPermit()
661             if (previousState != newState) state = newState
662             return hadCpu
663         }
664 
665         override fun run() = runWorker()
666 
667         @JvmField
668         var mayHaveLocalTasks = false
669 
670         private fun runWorker() {
671             var rescanned = false
672             while (!isTerminated && state != WorkerState.TERMINATED) {
673                 val task = findTask(mayHaveLocalTasks)
674                 // Task found. Execute and repeat
675                 if (task != null) {
676                     rescanned = false
677                     minDelayUntilStealableTaskNs = 0L
678                     executeTask(task)
679                     continue
680                 } else {
681                     mayHaveLocalTasks = false
682                 }
683                 /*
684                  * No tasks were found:
685                  * 1) Either at least one of the workers has stealable task in its FIFO-buffer with a stealing deadline.
686                  *    Then its deadline is stored in [minDelayUntilStealableTask]
687                  *
688                  * Then just park for that duration (ditto re-scanning).
689                  * While it could potentially lead to short (up to WORK_STEALING_TIME_RESOLUTION_NS ns) starvations,
690                  * excess unparks and managing "one unpark per signalling" invariant become unfeasible, instead we are going to resolve
691                  * it with "spinning via scans" mechanism.
692                  * NB: this short potential parking does not interfere with `tryUnpark`
693                  */
694                 if (minDelayUntilStealableTaskNs != 0L) {
695                     if (!rescanned) {
696                         rescanned = true
697                     } else {
698                         rescanned = false
699                         tryReleaseCpu(WorkerState.PARKING)
700                         interrupted()
701                         LockSupport.parkNanos(minDelayUntilStealableTaskNs)
702                         minDelayUntilStealableTaskNs = 0L
703                     }
704                     continue
705                 }
706                 /*
707                  * 2) Or no tasks available, time to park and, potentially, shut down the thread.
708                  * Add itself to the stack of parked workers, re-scans all the queues
709                  * to avoid missing wake-up (requestCpuWorker) and either starts executing discovered tasks or parks itself awaiting for new tasks.
710                  */
711                 tryPark()
712             }
713             tryReleaseCpu(WorkerState.TERMINATED)
714         }
715 
716         // Counterpart to "tryUnpark"
717         private fun tryPark() {
718             if (!inStack()) {
719                 parkedWorkersStackPush(this)
720                 return
721             }
722             assert { localQueue.size == 0 }
723             workerCtl.value = PARKED // Update value once
724             while (inStack()) { // Prevent spurious wakeups
725                 if (isTerminated || state == WorkerState.TERMINATED) break
726                 tryReleaseCpu(WorkerState.PARKING)
727                 interrupted() // Cleanup interruptions
728                 park()
729             }
730         }
731 
732         private fun inStack(): Boolean = nextParkedWorker !== NOT_IN_STACK
733 
734         private fun executeTask(task: Task) {
735             val taskMode = task.mode
736             idleReset(taskMode)
737             beforeTask(taskMode)
738             runSafely(task)
739             afterTask(taskMode)
740         }
741 
742         private fun beforeTask(taskMode: Int) {
743             if (taskMode == TASK_NON_BLOCKING) return
744             // Always notify about new work when releasing CPU-permit to execute some blocking task
745             if (tryReleaseCpu(WorkerState.BLOCKING)) {
746                 signalCpuWork()
747             }
748         }
749 
750         private fun afterTask(taskMode: Int) {
751             if (taskMode == TASK_NON_BLOCKING) return
752             decrementBlockingTasks()
753             val currentState = state
754             // Shutdown sequence of blocking dispatcher
755             if (currentState !== WorkerState.TERMINATED) {
756                 assert { currentState == WorkerState.BLOCKING } // "Expected BLOCKING state, but has $currentState"
757                 state = WorkerState.DORMANT
758             }
759         }
760 
761         /*
762          * Marsaglia xorshift RNG with period 2^32-1 for work stealing purposes.
763          * ThreadLocalRandom cannot be used to support Android and ThreadLocal<Random> is up to 15% slower on Ktor benchmarks
764          */
765         internal fun nextInt(upperBound: Int): Int {
766             var r = rngState
767             r = r xor (r shl 13)
768             r = r xor (r shr 17)
769             r = r xor (r shl 5)
770             rngState = r
771             val mask = upperBound - 1
772             // Fast path for power of two bound
773             if (mask and upperBound == 0) {
774                 return r and mask
775             }
776             return (r and Int.MAX_VALUE) % upperBound
777         }
778 
779         private fun park() {
780             // set termination deadline the first time we are here (it is reset in idleReset)
781             if (terminationDeadline == 0L) terminationDeadline = System.nanoTime() + idleWorkerKeepAliveNs
782             // actually park
783             LockSupport.parkNanos(idleWorkerKeepAliveNs)
784             // try terminate when we are idle past termination deadline
785             // note that comparison is written like this to protect against potential nanoTime wraparound
786             if (System.nanoTime() - terminationDeadline >= 0) {
787                 terminationDeadline = 0L // if attempt to terminate worker fails we'd extend deadline again
788                 tryTerminateWorker()
789             }
790         }
791 
792         /**
793          * Stops execution of current thread and removes it from [createdWorkers].
794          */
795         private fun tryTerminateWorker() {
796             synchronized(workers) {
797                 // Make sure we're not trying race with termination of scheduler
798                 if (isTerminated) return
799                 // Someone else terminated, bail out
800                 if (createdWorkers <= corePoolSize) return
801                 /*
802                  * See tryUnpark for state reasoning.
803                  * If this CAS fails, then we were successfully unparked by other worker and cannot terminate.
804                  */
805                 if (!workerCtl.compareAndSet(PARKED, TERMINATED)) return
806                 /*
807                  * At this point this thread is no longer considered as usable for scheduling.
808                  * We need multi-step choreography to reindex workers.
809                  *
810                  * 1) Read current worker's index and reset it to zero.
811                  */
812                 val oldIndex = indexInArray
813                 indexInArray = 0
814                 /*
815                  * Now this worker cannot become the top of parkedWorkersStack, but it can
816                  * still be at the stack top via oldIndex.
817                  *
818                  * 2) Update top of stack if it was pointing to oldIndex and make sure no
819                  *    pending push/pop operation that might have already retrieved oldIndex could complete.
820                  */
821                 parkedWorkersStackTopUpdate(this, oldIndex, 0)
822                 /*
823                  * 3) Move last worker into an index in array that was previously occupied by this worker,
824                  *    if last worker was a different one (sic!).
825                  */
826                 val lastIndex = decrementCreatedWorkers()
827                 if (lastIndex != oldIndex) {
828                     val lastWorker = workers[lastIndex]!!
829                     workers[oldIndex] = lastWorker
830                     lastWorker.indexInArray = oldIndex
831                     /*
832                      * Now lastWorker is available at both indices in the array, but it can
833                      * still be at the stack top on via its lastIndex
834                      *
835                      * 4) Update top of stack lastIndex -> oldIndex and make sure no
836                      *    pending push/pop operation that might have already retrieved lastIndex could complete.
837                      */
838                     parkedWorkersStackTopUpdate(lastWorker, lastIndex, oldIndex)
839                 }
840                 /*
841                  * 5) It is safe to clear reference from workers array now.
842                  */
843                 workers[lastIndex] = null
844             }
845             state = WorkerState.TERMINATED
846         }
847 
848         // It is invoked by this worker when it finds a task
849         private fun idleReset(mode: Int) {
850             terminationDeadline = 0L // reset deadline for termination
851             if (state == WorkerState.PARKING) {
852                 assert { mode == TASK_PROBABLY_BLOCKING }
853                 state = WorkerState.BLOCKING
854             }
855         }
856 
857         fun findTask(scanLocalQueue: Boolean): Task? {
858             if (tryAcquireCpuPermit()) return findAnyTask(scanLocalQueue)
859             // If we can't acquire a CPU permit -- attempt to find blocking task
860             val task = if (scanLocalQueue) {
861                 localQueue.poll() ?: globalBlockingQueue.removeFirstOrNull()
862             } else {
863                 globalBlockingQueue.removeFirstOrNull()
864             }
865             return task ?: trySteal(blockingOnly = true)
866         }
867 
868         private fun findAnyTask(scanLocalQueue: Boolean): Task? {
869             /*
870              * Anti-starvation mechanism: probabilistically poll either local
871              * or global queue to ensure progress for both external and internal tasks.
872              */
873             if (scanLocalQueue) {
874                 val globalFirst = nextInt(2 * corePoolSize) == 0
875                 if (globalFirst) pollGlobalQueues()?.let { return it }
876                 localQueue.poll()?.let { return it }
877                 if (!globalFirst) pollGlobalQueues()?.let { return it }
878             } else {
879                 pollGlobalQueues()?.let { return it }
880             }
881             return trySteal(blockingOnly = false)
882         }
883 
884         private fun pollGlobalQueues(): Task? {
885             if (nextInt(2) == 0) {
886                 globalCpuQueue.removeFirstOrNull()?.let { return it }
887                 return globalBlockingQueue.removeFirstOrNull()
888             } else {
889                 globalBlockingQueue.removeFirstOrNull()?.let { return it }
890                 return globalCpuQueue.removeFirstOrNull()
891             }
892         }
893 
894         private fun trySteal(blockingOnly: Boolean): Task? {
895             assert { localQueue.size == 0 }
896             val created = createdWorkers
897             // 0 to await an initialization and 1 to avoid excess stealing on single-core machines
898             if (created < 2) {
899                 return null
900             }
901 
902             var currentIndex = nextInt(created)
903             var minDelay = Long.MAX_VALUE
904             repeat(created) {
905                 ++currentIndex
906                 if (currentIndex > created) currentIndex = 1
907                 val worker = workers[currentIndex]
908                 if (worker !== null && worker !== this) {
909                     assert { localQueue.size == 0 }
910                     val stealResult = if (blockingOnly) {
911                         localQueue.tryStealBlockingFrom(victim = worker.localQueue)
912                     } else {
913                         localQueue.tryStealFrom(victim = worker.localQueue)
914                     }
915                     if (stealResult == TASK_STOLEN) {
916                         return localQueue.poll()
917                     } else if (stealResult > 0) {
918                         minDelay = min(minDelay, stealResult)
919                     }
920                 }
921             }
922             minDelayUntilStealableTaskNs = if (minDelay != Long.MAX_VALUE) minDelay else 0
923             return null
924         }
925     }
926 
927     enum class WorkerState {
928         /**
929          * Has CPU token and either executes [TASK_NON_BLOCKING] task or tries to find one.
930          */
931         CPU_ACQUIRED,
932 
933         /**
934          * Executing task with [TASK_PROBABLY_BLOCKING].
935          */
936         BLOCKING,
937 
938         /**
939          * Currently parked.
940          */
941         PARKING,
942 
943         /**
944          * Tries to execute its local work and then goes to infinite sleep as no longer needed worker.
945          */
946         DORMANT,
947 
948         /**
949          * Terminal state, will no longer be used
950          */
951         TERMINATED
952     }
953 }
954 
955 /**
956  * Checks if the thread is part of a thread pool that supports coroutines.
957  * This function is needed for integration with BlockHound.
958  */
959 @Suppress("UNUSED")
960 @JvmName("isSchedulerWorker")
isSchedulerWorkernull961 internal fun isSchedulerWorker(thread: Thread) = thread is CoroutineScheduler.Worker
962 
963 /**
964  * Checks if the thread is running a CPU-bound task.
965  * This function is needed for integration with BlockHound.
966  */
967 @Suppress("UNUSED")
968 @JvmName("mayNotBlock")
969 internal fun mayNotBlock(thread: Thread) = thread is CoroutineScheduler.Worker &&
970     thread.state == CoroutineScheduler.WorkerState.CPU_ACQUIRED
971