• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv-common.h"
23 
24 #if !defined(_WIN32)
25 # include "unix/internal.h"
26 #endif
27 
28 #include <stdlib.h>
29 #ifdef USE_FFRT
30 #include <assert.h>
31 #include "ffrt_inner.h"
32 #endif
33 
34 #define MAX_THREADPOOL_SIZE 1024
35 
36 static uv_cond_t cond;
37 static uv_mutex_t mutex;
38 static unsigned int idle_threads;
39 static unsigned int nthreads;
40 static uv_thread_t* threads;
41 static uv_thread_t default_threads[4];
42 static QUEUE exit_message;
43 static QUEUE wq;
44 static QUEUE run_slow_work_message;
45 static QUEUE slow_io_pending_wq;
46 
uv__cancelled(struct uv__work * w)47 static void uv__cancelled(struct uv__work* w) {
48   abort();
49 }
50 
51 static uv_once_t once = UV_ONCE_INIT;
52 #ifndef USE_FFRT
53 static unsigned int slow_io_work_running;
54 
slow_work_thread_threshold(void)55 static unsigned int slow_work_thread_threshold(void) {
56   return (nthreads + 1) / 2;
57 }
58 
59 
60 /* To avoid deadlock with uv_cancel() it's crucial that the worker
61  * never holds the global mutex and the loop-local mutex at the same time.
62  */
worker(void * arg)63 static void worker(void* arg) {
64   struct uv__work* w;
65   QUEUE* q;
66   int is_slow_work;
67 
68   uv_sem_post((uv_sem_t*) arg);
69   arg = NULL;
70 
71   uv_mutex_lock(&mutex);
72   for (;;) {
73     /* `mutex` should always be locked at this point. */
74 
75     /* Keep waiting while either no work is present or only slow I/O
76        and we're at the threshold for that. */
77     while (QUEUE_EMPTY(&wq) ||
78            (QUEUE_HEAD(&wq) == &run_slow_work_message &&
79             QUEUE_NEXT(&run_slow_work_message) == &wq &&
80             slow_io_work_running >= slow_work_thread_threshold())) {
81       idle_threads += 1;
82       uv_cond_wait(&cond, &mutex);
83       idle_threads -= 1;
84     }
85 
86     q = QUEUE_HEAD(&wq);
87     if (q == &exit_message) {
88       uv_cond_signal(&cond);
89       uv_mutex_unlock(&mutex);
90       break;
91     }
92 
93     QUEUE_REMOVE(q);
94     QUEUE_INIT(q);  /* Signal uv_cancel() that the work req is executing. */
95 
96     is_slow_work = 0;
97     if (q == &run_slow_work_message) {
98       /* If we're at the slow I/O threshold, re-schedule until after all
99          other work in the queue is done. */
100       if (slow_io_work_running >= slow_work_thread_threshold()) {
101         QUEUE_INSERT_TAIL(&wq, q);
102         continue;
103       }
104 
105       /* If we encountered a request to run slow I/O work but there is none
106          to run, that means it's cancelled => Start over. */
107       if (QUEUE_EMPTY(&slow_io_pending_wq))
108         continue;
109 
110       is_slow_work = 1;
111       slow_io_work_running++;
112 
113       q = QUEUE_HEAD(&slow_io_pending_wq);
114       QUEUE_REMOVE(q);
115       QUEUE_INIT(q);
116 
117       /* If there is more slow I/O work, schedule it to be run as well. */
118       if (!QUEUE_EMPTY(&slow_io_pending_wq)) {
119         QUEUE_INSERT_TAIL(&wq, &run_slow_work_message);
120         if (idle_threads > 0)
121           uv_cond_signal(&cond);
122       }
123     }
124 
125     uv_mutex_unlock(&mutex);
126 
127     w = QUEUE_DATA(q, struct uv__work, wq);
128     w->work(w);
129 
130     uv_mutex_lock(&w->loop->wq_mutex);
131     w->work = NULL;  /* Signal uv_cancel() that the work req is done
132                         executing. */
133     QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
134     uv_async_send(&w->loop->wq_async);
135     uv_mutex_unlock(&w->loop->wq_mutex);
136 
137     /* Lock `mutex` since that is expected at the start of the next
138      * iteration. */
139     uv_mutex_lock(&mutex);
140     if (is_slow_work) {
141       /* `slow_io_work_running` is protected by `mutex`. */
142       slow_io_work_running--;
143     }
144   }
145 }
146 #endif
147 
148 
post(QUEUE * q,enum uv__work_kind kind)149 static void post(QUEUE* q, enum uv__work_kind kind) {
150   uv_mutex_lock(&mutex);
151   if (kind == UV__WORK_SLOW_IO) {
152     /* Insert into a separate queue. */
153     QUEUE_INSERT_TAIL(&slow_io_pending_wq, q);
154     if (!QUEUE_EMPTY(&run_slow_work_message)) {
155       /* Running slow I/O tasks is already scheduled => Nothing to do here.
156          The worker that runs said other task will schedule this one as well. */
157       uv_mutex_unlock(&mutex);
158       return;
159     }
160     q = &run_slow_work_message;
161   }
162 
163   QUEUE_INSERT_TAIL(&wq, q);
164   if (idle_threads > 0)
165     uv_cond_signal(&cond);
166   uv_mutex_unlock(&mutex);
167 }
168 
169 
170 #ifdef __MVS__
171 /* TODO(itodorov) - zos: revisit when Woz compiler is available. */
172 __attribute__((destructor))
173 #endif
uv__threadpool_cleanup(void)174 void uv__threadpool_cleanup(void) {
175   unsigned int i;
176 
177   if (nthreads == 0)
178     return;
179 
180 #ifndef __MVS__
181   /* TODO(gabylb) - zos: revisit when Woz compiler is available. */
182   post(&exit_message, UV__WORK_CPU);
183 #endif
184 
185   for (i = 0; i < nthreads; i++)
186     if (uv_thread_join(threads + i))
187       abort();
188 
189   if (threads != default_threads)
190     uv__free(threads);
191 
192   uv_mutex_destroy(&mutex);
193   uv_cond_destroy(&cond);
194 
195   threads = NULL;
196   nthreads = 0;
197 }
198 
199 
200 #ifndef USE_FFRT
init_threads(void)201 static void init_threads(void) {
202   unsigned int i;
203   const char* val;
204   uv_sem_t sem;
205 
206   nthreads = ARRAY_SIZE(default_threads);
207   val = getenv("UV_THREADPOOL_SIZE");
208   if (val != NULL)
209     nthreads = atoi(val);
210   if (nthreads == 0)
211     nthreads = 1;
212   if (nthreads > MAX_THREADPOOL_SIZE)
213     nthreads = MAX_THREADPOOL_SIZE;
214 
215   threads = default_threads;
216   if (nthreads > ARRAY_SIZE(default_threads)) {
217     threads = uv__malloc(nthreads * sizeof(threads[0]));
218     if (threads == NULL) {
219       nthreads = ARRAY_SIZE(default_threads);
220       threads = default_threads;
221     }
222   }
223 
224   if (uv_cond_init(&cond))
225     abort();
226 
227   if (uv_mutex_init(&mutex))
228     abort();
229 
230   QUEUE_INIT(&wq);
231   QUEUE_INIT(&slow_io_pending_wq);
232   QUEUE_INIT(&run_slow_work_message);
233 
234   if (uv_sem_init(&sem, 0))
235     abort();
236 
237   for (i = 0; i < nthreads; i++)
238     if (uv_thread_create(threads + i, worker, &sem))
239       abort();
240 
241   for (i = 0; i < nthreads; i++)
242     uv_sem_wait(&sem);
243 
244   uv_sem_destroy(&sem);
245 }
246 
247 
248 #ifndef _WIN32
reset_once(void)249 static void reset_once(void) {
250   uv_once_t child_once = UV_ONCE_INIT;
251   memcpy(&once, &child_once, sizeof(child_once));
252 }
253 #endif
254 
255 
init_once(void)256 static void init_once(void) {
257 #ifndef _WIN32
258   /* Re-initialize the threadpool after fork.
259    * Note that this discards the global mutex and condition as well
260    * as the work queue.
261    */
262   if (pthread_atfork(NULL, NULL, &reset_once))
263     abort();
264 #endif
265   init_threads();
266 }
267 
268 
uv__work_submit(uv_loop_t * loop,struct uv__work * w,enum uv__work_kind kind,void (* work)(struct uv__work * w),void (* done)(struct uv__work * w,int status))269 void uv__work_submit(uv_loop_t* loop,
270                      struct uv__work* w,
271                      enum uv__work_kind kind,
272                      void (*work)(struct uv__work* w),
273                      void (*done)(struct uv__work* w, int status)) {
274   uv_once(&once, init_once);
275   w->loop = loop;
276   w->work = work;
277   w->done = done;
278   post(&w->wq, kind);
279 }
280 #endif
281 
282 
uv__work_cancel(uv_loop_t * loop,uv_req_t * req,struct uv__work * w)283 static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
284   int cancelled;
285 
286 #ifndef USE_FFRT
287   uv_mutex_lock(&mutex);
288   uv_mutex_lock(&w->loop->wq_mutex);
289 
290   cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
291   if (cancelled)
292     QUEUE_REMOVE(&w->wq);
293 
294   uv_mutex_unlock(&w->loop->wq_mutex);
295   uv_mutex_unlock(&mutex);
296 #else
297   uv_mutex_lock(&w->loop->wq_mutex);
298   cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL
299     && ffrt_executor_task_cancel(w, (ffrt_qos_t)(intptr_t)req->reserved[0]);
300   uv_mutex_unlock(&w->loop->wq_mutex);
301 #endif
302 
303   if (!cancelled)
304     return UV_EBUSY;
305 
306   w->work = uv__cancelled;
307   uv_mutex_lock(&loop->wq_mutex);
308 #ifndef USE_FFRT
309   QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
310 #else
311   uv__loop_internal_fields_t* lfields = uv__get_internal_fields(w->loop);
312   int qos = (ffrt_qos_t)(intptr_t)req->reserved[0];
313   QUEUE_INSERT_TAIL(&(lfields->wq_sub[qos]), &w->wq);
314 #endif
315   uv_async_send(&loop->wq_async);
316   uv_mutex_unlock(&loop->wq_mutex);
317 
318   return 0;
319 }
320 
321 
uv__work_done(uv_async_t * handle)322 void uv__work_done(uv_async_t* handle) {
323   struct uv__work* w;
324   uv_loop_t* loop;
325   QUEUE* q;
326   QUEUE wq;
327   int err;
328 
329   loop = container_of(handle, uv_loop_t, wq_async);
330   uv_mutex_lock(&loop->wq_mutex);
331 #ifndef USE_FFRT
332   QUEUE_MOVE(&loop->wq, &wq);
333 #else
334   uv__loop_internal_fields_t* lfields = uv__get_internal_fields(loop);
335   int i;
336   QUEUE_INIT(&wq);
337   for (i = 3; i >= 0; i--) {
338     if (!QUEUE_EMPTY(&lfields->wq_sub[i])) {
339       QUEUE_APPEND(&lfields->wq_sub[i], &wq);
340     }
341   }
342 #endif
343   uv_mutex_unlock(&loop->wq_mutex);
344 
345   while (!QUEUE_EMPTY(&wq)) {
346     q = QUEUE_HEAD(&wq);
347     QUEUE_REMOVE(q);
348 
349     w = container_of(q, struct uv__work, wq);
350     err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
351     w->done(w, err);
352   }
353 }
354 
355 
uv__queue_work(struct uv__work * w)356 static void uv__queue_work(struct uv__work* w) {
357   uv_work_t* req = container_of(w, uv_work_t, work_req);
358 
359   req->work_cb(req);
360 }
361 
362 
uv__queue_done(struct uv__work * w,int err)363 static void uv__queue_done(struct uv__work* w, int err) {
364   uv_work_t* req;
365 
366   req = container_of(w, uv_work_t, work_req);
367   uv__req_unregister(req->loop, req);
368 
369   if (req->after_work_cb == NULL)
370     return;
371 
372   req->after_work_cb(req, err);
373 }
374 
375 
376 #ifdef USE_FFRT
uv__ffrt_work(ffrt_executor_task_t * data,ffrt_qos_t qos)377 void uv__ffrt_work(ffrt_executor_task_t* data, ffrt_qos_t qos)
378 {
379   struct uv__work* w = (struct uv__work *)data;
380   w->work(w);
381   uv__loop_internal_fields_t* lfields = uv__get_internal_fields(w->loop);
382 
383   if (&lfields->wq_sub[qos][0] == NULL || &lfields->wq_sub[qos][1] == NULL)
384     return;
385 
386   uv_mutex_lock(&w->loop->wq_mutex);
387   w->work = NULL; /* Signal uv_cancel() that the work req is done executing. */
388   QUEUE_INSERT_TAIL(&(lfields->wq_sub[qos]), &w->wq);
389   uv_async_send(&w->loop->wq_async);
390   uv_mutex_unlock(&w->loop->wq_mutex);
391 }
392 
init_once(void)393 static void init_once(void)
394 {
395   ffrt_executor_task_register_func(uv__ffrt_work, ffrt_uv_task);
396 }
397 
398 
399 /* ffrt uv__work_submit */
uv__work_submit(uv_loop_t * loop,uv_req_t * req,struct uv__work * w,enum uv__work_kind kind,void (* work)(struct uv__work * w),void (* done)(struct uv__work * w,int status))400 void uv__work_submit(uv_loop_t* loop,
401                      uv_req_t* req,
402                      struct uv__work* w,
403                      enum uv__work_kind kind,
404                      void (*work)(struct uv__work *w),
405                      void (*done)(struct uv__work *w, int status)) {
406     uv_once(&once, init_once);
407     ffrt_task_attr_t attr;
408     ffrt_task_attr_init(&attr);
409 
410     switch(kind) {
411       case UV__WORK_CPU:
412         ffrt_task_attr_set_qos(&attr, ffrt_qos_default);
413         break;
414       case UV__WORK_FAST_IO:
415         ffrt_task_attr_set_qos(&attr, ffrt_qos_default);
416         break;
417       case UV__WORK_SLOW_IO:
418         ffrt_task_attr_set_qos(&attr, ffrt_qos_background);
419         break;
420       default:
421         return;
422     }
423 
424     w->loop = loop;
425     w->work = work;
426     w->done = done;
427 
428     req->reserved[0] = (void *)(intptr_t)ffrt_task_attr_get_qos(&attr);
429     ffrt_executor_task_submit((ffrt_executor_task_t *)w, &attr);
430     ffrt_task_attr_destroy(&attr);
431 }
432 
433 
434 /* ffrt uv__work_submit */
uv__work_submit_with_qos(uv_loop_t * loop,uv_req_t * req,struct uv__work * w,ffrt_qos_t qos,void (* work)(struct uv__work * w),void (* done)(struct uv__work * w,int status))435 void uv__work_submit_with_qos(uv_loop_t* loop,
436                      uv_req_t* req,
437                      struct uv__work* w,
438                      ffrt_qos_t qos,
439                      void (*work)(struct uv__work *w),
440                      void (*done)(struct uv__work *w, int status)) {
441     uv_once(&once, init_once);
442     ffrt_task_attr_t attr;
443     ffrt_task_attr_init(&attr);
444     ffrt_task_attr_set_qos(&attr, qos);
445 
446     w->loop = loop;
447     w->work = work;
448     w->done = done;
449 
450     req->reserved[0] = (void *)(intptr_t)ffrt_task_attr_get_qos(&attr);
451     ffrt_executor_task_submit((ffrt_executor_task_t *)w, &attr);
452     ffrt_task_attr_destroy(&attr);
453 }
454 #endif
455 
456 
uv_queue_work(uv_loop_t * loop,uv_work_t * req,uv_work_cb work_cb,uv_after_work_cb after_work_cb)457 int uv_queue_work(uv_loop_t* loop,
458                   uv_work_t* req,
459                   uv_work_cb work_cb,
460                   uv_after_work_cb after_work_cb) {
461   if (work_cb == NULL)
462     return UV_EINVAL;
463 
464   uv__req_init(loop, req, UV_WORK);
465   req->loop = loop;
466   req->work_cb = work_cb;
467   req->after_work_cb = after_work_cb;
468   uv__work_submit(loop,
469 #ifdef USE_FFRT
470                   (uv_req_t*)req,
471 #endif
472                   &req->work_req,
473                   UV__WORK_CPU,
474                   uv__queue_work,
475                   uv__queue_done);
476   return 0;
477 }
478 
479 
uv_queue_work_with_qos(uv_loop_t * loop,uv_work_t * req,uv_work_cb work_cb,uv_after_work_cb after_work_cb,uv_qos_t qos)480 int uv_queue_work_with_qos(uv_loop_t* loop,
481                   uv_work_t* req,
482                   uv_work_cb work_cb,
483                   uv_after_work_cb after_work_cb,
484                   uv_qos_t qos) {
485 #ifdef USE_FFRT
486   if (work_cb == NULL)
487     return UV_EINVAL;
488 
489   STATIC_ASSERT(uv_qos_background == ffrt_qos_background);
490   STATIC_ASSERT(uv_qos_utility == ffrt_qos_utility);
491   STATIC_ASSERT(uv_qos_default == ffrt_qos_default);
492   STATIC_ASSERT(uv_qos_user_initiated == ffrt_qos_user_initiated);
493   if (qos < ffrt_qos_background || qos > ffrt_qos_user_initiated) {
494     return UV_EINVAL;
495   }
496 
497   uv__req_init(loop, req, UV_WORK);
498   req->loop = loop;
499   req->work_cb = work_cb;
500   req->after_work_cb = after_work_cb;
501   uv__work_submit_with_qos(loop,
502                   (uv_req_t*)req,
503                   &req->work_req,
504                   (ffrt_qos_t)qos,
505                   uv__queue_work,
506                   uv__queue_done);
507   return 0;
508 #else
509   return uv_queue_work(loop, req, work_cb, after_work_cb);
510 #endif
511 }
512 
513 
uv_cancel(uv_req_t * req)514 int uv_cancel(uv_req_t* req) {
515   struct uv__work* wreq;
516   uv_loop_t* loop;
517 
518   switch (req->type) {
519   case UV_FS:
520     loop =  ((uv_fs_t*) req)->loop;
521     wreq = &((uv_fs_t*) req)->work_req;
522     break;
523   case UV_GETADDRINFO:
524     loop =  ((uv_getaddrinfo_t*) req)->loop;
525     wreq = &((uv_getaddrinfo_t*) req)->work_req;
526     break;
527   case UV_GETNAMEINFO:
528     loop = ((uv_getnameinfo_t*) req)->loop;
529     wreq = &((uv_getnameinfo_t*) req)->work_req;
530     break;
531   case UV_RANDOM:
532     loop = ((uv_random_t*) req)->loop;
533     wreq = &((uv_random_t*) req)->work_req;
534     break;
535   case UV_WORK:
536     loop =  ((uv_work_t*) req)->loop;
537     wreq = &((uv_work_t*) req)->work_req;
538     break;
539   default:
540     return UV_EINVAL;
541   }
542 
543   return uv__work_cancel(loop, req, wreq);
544 }
545