• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2  *
3  * Permission is hereby granted, free of charge, to any person obtaining a copy
4  * of this software and associated documentation files (the "Software"), to
5  * deal in the Software without restriction, including without limitation the
6  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7  * sell copies of the Software, and to permit persons to whom the Software is
8  * furnished to do so, subject to the following conditions:
9  *
10  * The above copyright notice and this permission notice shall be included in
11  * all copies or substantial portions of the Software.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19  * IN THE SOFTWARE.
20  */
21 
22 #include "uv.h"
23 #include "internal.h"
24 
25 #include <assert.h>
26 #include <string.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #if defined(__MVS__)
31 #include <xti.h>
32 #endif
33 #include <sys/un.h>
34 
35 #if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
36 # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
37 #endif
38 
39 #if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
40 # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
41 #endif
42 
43 union uv__sockaddr {
44   struct sockaddr_in6 in6;
45   struct sockaddr_in in;
46   struct sockaddr addr;
47 };
48 
49 static void uv__udp_run_completed(uv_udp_t* handle);
50 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
51 static void uv__udp_recvmsg(uv_udp_t* handle);
52 static void uv__udp_sendmsg(uv_udp_t* handle);
53 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
54                                        int domain,
55                                        unsigned int flags);
56 
57 #if HAVE_MMSG
58 
59 #define UV__MMSG_MAXWIDTH 20
60 
61 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
62 static void uv__udp_sendmmsg(uv_udp_t* handle);
63 
64 static int uv__recvmmsg_avail;
65 static int uv__sendmmsg_avail;
66 static uv_once_t once = UV_ONCE_INIT;
67 
uv__udp_mmsg_init(void)68 static void uv__udp_mmsg_init(void) {
69   int ret;
70   int s;
71   s = uv__socket(AF_INET, SOCK_DGRAM, 0);
72   if (s < 0)
73     return;
74   ret = uv__sendmmsg(s, NULL, 0);
75   if (ret == 0 || errno != ENOSYS) {
76     uv__sendmmsg_avail = 1;
77     uv__recvmmsg_avail = 1;
78   } else {
79     ret = uv__recvmmsg(s, NULL, 0);
80     if (ret == 0 || errno != ENOSYS)
81       uv__recvmmsg_avail = 1;
82   }
83   uv__close(s);
84 }
85 
86 #endif
87 
uv__udp_close(uv_udp_t * handle)88 void uv__udp_close(uv_udp_t* handle) {
89   uv__io_close(handle->loop, &handle->io_watcher);
90   uv__handle_stop(handle);
91 
92   if (handle->io_watcher.fd != -1) {
93     uv__close(handle->io_watcher.fd);
94     handle->io_watcher.fd = -1;
95   }
96 }
97 
98 
uv__udp_finish_close(uv_udp_t * handle)99 void uv__udp_finish_close(uv_udp_t* handle) {
100   uv_udp_send_t* req;
101   QUEUE* q;
102 
103   assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
104   assert(handle->io_watcher.fd == -1);
105 
106   while (!QUEUE_EMPTY(&handle->write_queue)) {
107     q = QUEUE_HEAD(&handle->write_queue);
108     QUEUE_REMOVE(q);
109 
110     req = QUEUE_DATA(q, uv_udp_send_t, queue);
111     req->status = UV_ECANCELED;
112     QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
113   }
114 
115   uv__udp_run_completed(handle);
116 
117   assert(handle->send_queue_size == 0);
118   assert(handle->send_queue_count == 0);
119 
120   /* Now tear down the handle. */
121   handle->recv_cb = NULL;
122   handle->alloc_cb = NULL;
123   /* but _do not_ touch close_cb */
124 }
125 
126 
uv__udp_run_completed(uv_udp_t * handle)127 static void uv__udp_run_completed(uv_udp_t* handle) {
128   uv_udp_send_t* req;
129   QUEUE* q;
130 
131   assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
132   handle->flags |= UV_HANDLE_UDP_PROCESSING;
133 
134   while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
135     q = QUEUE_HEAD(&handle->write_completed_queue);
136     QUEUE_REMOVE(q);
137 
138     req = QUEUE_DATA(q, uv_udp_send_t, queue);
139     uv__req_unregister(handle->loop, req);
140 
141     handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
142     handle->send_queue_count--;
143 
144     if (req->bufs != req->bufsml)
145       uv__free(req->bufs);
146     req->bufs = NULL;
147 
148     if (req->send_cb == NULL)
149       continue;
150 
151     /* req->status >= 0 == bytes written
152      * req->status <  0 == errno
153      */
154     if (req->status >= 0)
155       req->send_cb(req, 0);
156     else
157       req->send_cb(req, req->status);
158   }
159 
160   if (QUEUE_EMPTY(&handle->write_queue)) {
161     /* Pending queue and completion queue empty, stop watcher. */
162     uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
163     if (!uv__io_active(&handle->io_watcher, POLLIN))
164       uv__handle_stop(handle);
165   }
166 
167   handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
168 }
169 
170 
uv__udp_io(uv_loop_t * loop,uv__io_t * w,unsigned int revents)171 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
172   uv_udp_t* handle;
173 
174   handle = container_of(w, uv_udp_t, io_watcher);
175   assert(handle->type == UV_UDP);
176 
177   if (revents & POLLIN)
178     uv__udp_recvmsg(handle);
179 
180   if (revents & POLLOUT) {
181     uv__udp_sendmsg(handle);
182     uv__udp_run_completed(handle);
183   }
184 }
185 
186 #if HAVE_MMSG
uv__udp_recvmmsg(uv_udp_t * handle,uv_buf_t * buf)187 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
188   struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
189   struct iovec iov[UV__MMSG_MAXWIDTH];
190   struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
191   ssize_t nread;
192   uv_buf_t chunk_buf;
193   size_t chunks;
194   int flags;
195   size_t k;
196 
197   /* prepare structures for recvmmsg */
198   chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
199   if (chunks > ARRAY_SIZE(iov))
200     chunks = ARRAY_SIZE(iov);
201   for (k = 0; k < chunks; ++k) {
202     iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
203     iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
204     memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
205     msgs[k].msg_hdr.msg_iov = iov + k;
206     msgs[k].msg_hdr.msg_iovlen = 1;
207     msgs[k].msg_hdr.msg_name = peers + k;
208     msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
209     msgs[k].msg_hdr.msg_control = NULL;
210     msgs[k].msg_hdr.msg_controllen = 0;
211     msgs[k].msg_hdr.msg_flags = 0;
212   }
213 
214   do
215     nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
216   while (nread == -1 && errno == EINTR);
217 
218   if (nread < 1) {
219     if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
220       handle->recv_cb(handle, 0, buf, NULL, 0);
221     else
222       handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
223   } else {
224     /* pass each chunk to the application */
225     for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
226       flags = UV_UDP_MMSG_CHUNK;
227       if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
228         flags |= UV_UDP_PARTIAL;
229 
230       chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
231       handle->recv_cb(handle,
232                       msgs[k].msg_len,
233                       &chunk_buf,
234                       msgs[k].msg_hdr.msg_name,
235                       flags);
236     }
237 
238     /* one last callback so the original buffer is freed */
239     if (handle->recv_cb != NULL)
240       handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
241   }
242   return nread;
243 }
244 #endif
245 
uv__udp_recvmsg(uv_udp_t * handle)246 static void uv__udp_recvmsg(uv_udp_t* handle) {
247   struct sockaddr_storage peer;
248   struct msghdr h;
249   ssize_t nread;
250   uv_buf_t buf;
251   int flags;
252   int count;
253 
254   assert(handle->recv_cb != NULL);
255   assert(handle->alloc_cb != NULL);
256 
257   /* Prevent loop starvation when the data comes in as fast as (or faster than)
258    * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
259    */
260   count = 32;
261 
262   do {
263     buf = uv_buf_init(NULL, 0);
264     handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
265     if (buf.base == NULL || buf.len == 0) {
266       handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
267       return;
268     }
269     assert(buf.base != NULL);
270 
271 #if HAVE_MMSG
272     if (uv_udp_using_recvmmsg(handle)) {
273       nread = uv__udp_recvmmsg(handle, &buf);
274       if (nread > 0)
275         count -= nread;
276       continue;
277     }
278 #endif
279 
280     memset(&h, 0, sizeof(h));
281     memset(&peer, 0, sizeof(peer));
282     h.msg_name = &peer;
283     h.msg_namelen = sizeof(peer);
284     h.msg_iov = (void*) &buf;
285     h.msg_iovlen = 1;
286 
287     do {
288       nread = recvmsg(handle->io_watcher.fd, &h, 0);
289     }
290     while (nread == -1 && errno == EINTR);
291 
292     if (nread == -1) {
293       if (errno == EAGAIN || errno == EWOULDBLOCK)
294         handle->recv_cb(handle, 0, &buf, NULL, 0);
295       else
296         handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
297     }
298     else {
299       flags = 0;
300       if (h.msg_flags & MSG_TRUNC)
301         flags |= UV_UDP_PARTIAL;
302 
303       handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
304     }
305     count--;
306   }
307   /* recv_cb callback may decide to pause or close the handle */
308   while (nread != -1
309       && count > 0
310       && handle->io_watcher.fd != -1
311       && handle->recv_cb != NULL);
312 }
313 
314 #if HAVE_MMSG
uv__udp_sendmmsg(uv_udp_t * handle)315 static void uv__udp_sendmmsg(uv_udp_t* handle) {
316   uv_udp_send_t* req;
317   struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
318   struct uv__mmsghdr *p;
319   QUEUE* q;
320   ssize_t npkts;
321   size_t pkts;
322   size_t i;
323 
324   if (QUEUE_EMPTY(&handle->write_queue))
325     return;
326 
327 write_queue_drain:
328   for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
329        pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
330        ++pkts, q = QUEUE_HEAD(q)) {
331     assert(q != NULL);
332     req = QUEUE_DATA(q, uv_udp_send_t, queue);
333     assert(req != NULL);
334 
335     p = &h[pkts];
336     memset(p, 0, sizeof(*p));
337     if (req->addr.ss_family == AF_UNSPEC) {
338       p->msg_hdr.msg_name = NULL;
339       p->msg_hdr.msg_namelen = 0;
340     } else {
341       p->msg_hdr.msg_name = &req->addr;
342       if (req->addr.ss_family == AF_INET6)
343         p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
344       else if (req->addr.ss_family == AF_INET)
345         p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
346       else if (req->addr.ss_family == AF_UNIX)
347         p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
348       else {
349         assert(0 && "unsupported address family");
350         abort();
351       }
352     }
353     h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
354     h[pkts].msg_hdr.msg_iovlen = req->nbufs;
355   }
356 
357   do
358     npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
359   while (npkts == -1 && errno == EINTR);
360 
361   if (npkts < 1) {
362     if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
363       return;
364     for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
365          i < pkts && q != &handle->write_queue;
366          ++i, q = QUEUE_HEAD(&handle->write_queue)) {
367       assert(q != NULL);
368       req = QUEUE_DATA(q, uv_udp_send_t, queue);
369       assert(req != NULL);
370 
371       req->status = UV__ERR(errno);
372       QUEUE_REMOVE(&req->queue);
373       QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
374     }
375     uv__io_feed(handle->loop, &handle->io_watcher);
376     return;
377   }
378 
379   /* Safety: npkts known to be >0 below. Hence cast from ssize_t
380    * to size_t safe.
381    */
382   for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
383        i < (size_t)npkts && q != &handle->write_queue;
384        ++i, q = QUEUE_HEAD(&handle->write_queue)) {
385     assert(q != NULL);
386     req = QUEUE_DATA(q, uv_udp_send_t, queue);
387     assert(req != NULL);
388 
389     req->status = req->bufs[0].len;
390 
391     /* Sending a datagram is an atomic operation: either all data
392      * is written or nothing is (and EMSGSIZE is raised). That is
393      * why we don't handle partial writes. Just pop the request
394      * off the write queue and onto the completed queue, done.
395      */
396     QUEUE_REMOVE(&req->queue);
397     QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
398   }
399 
400   /* couldn't batch everything, continue sending (jump to avoid stack growth) */
401   if (!QUEUE_EMPTY(&handle->write_queue))
402     goto write_queue_drain;
403   uv__io_feed(handle->loop, &handle->io_watcher);
404   return;
405 }
406 #endif
407 
uv__udp_sendmsg(uv_udp_t * handle)408 static void uv__udp_sendmsg(uv_udp_t* handle) {
409   uv_udp_send_t* req;
410   struct msghdr h;
411   QUEUE* q;
412   ssize_t size;
413 
414 #if HAVE_MMSG
415   uv_once(&once, uv__udp_mmsg_init);
416   if (uv__sendmmsg_avail) {
417     uv__udp_sendmmsg(handle);
418     return;
419   }
420 #endif
421 
422   while (!QUEUE_EMPTY(&handle->write_queue)) {
423     q = QUEUE_HEAD(&handle->write_queue);
424     assert(q != NULL);
425 
426     req = QUEUE_DATA(q, uv_udp_send_t, queue);
427     assert(req != NULL);
428 
429     memset(&h, 0, sizeof h);
430     if (req->addr.ss_family == AF_UNSPEC) {
431       h.msg_name = NULL;
432       h.msg_namelen = 0;
433     } else {
434       h.msg_name = &req->addr;
435       if (req->addr.ss_family == AF_INET6)
436         h.msg_namelen = sizeof(struct sockaddr_in6);
437       else if (req->addr.ss_family == AF_INET)
438         h.msg_namelen = sizeof(struct sockaddr_in);
439       else if (req->addr.ss_family == AF_UNIX)
440         h.msg_namelen = sizeof(struct sockaddr_un);
441       else {
442         assert(0 && "unsupported address family");
443         abort();
444       }
445     }
446     h.msg_iov = (struct iovec*) req->bufs;
447     h.msg_iovlen = req->nbufs;
448 
449     do {
450       size = sendmsg(handle->io_watcher.fd, &h, 0);
451     } while (size == -1 && errno == EINTR);
452 
453     if (size == -1) {
454       if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
455         break;
456     }
457 
458     req->status = (size == -1 ? UV__ERR(errno) : size);
459 
460     /* Sending a datagram is an atomic operation: either all data
461      * is written or nothing is (and EMSGSIZE is raised). That is
462      * why we don't handle partial writes. Just pop the request
463      * off the write queue and onto the completed queue, done.
464      */
465     QUEUE_REMOVE(&req->queue);
466     QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
467     uv__io_feed(handle->loop, &handle->io_watcher);
468   }
469 }
470 
471 /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
472  * refinements for programs that use multicast.
473  *
474  * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
475  * are different from the BSDs: it _shares_ the port rather than steal it
476  * from the current listener.  While useful, it's not something we can emulate
477  * on other platforms so we don't enable it.
478  *
479  * zOS does not support getsockname with SO_REUSEPORT option when using
480  * AF_UNIX.
481  */
uv__set_reuse(int fd)482 static int uv__set_reuse(int fd) {
483   int yes;
484   yes = 1;
485 
486 #if defined(SO_REUSEPORT) && defined(__MVS__)
487   struct sockaddr_in sockfd;
488   unsigned int sockfd_len = sizeof(sockfd);
489   if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
490       return UV__ERR(errno);
491   if (sockfd.sin_family == AF_UNIX) {
492     if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
493       return UV__ERR(errno);
494   } else {
495     if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
496        return UV__ERR(errno);
497   }
498 #elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
499   if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
500     return UV__ERR(errno);
501 #else
502   if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
503     return UV__ERR(errno);
504 #endif
505 
506   return 0;
507 }
508 
509 /*
510  * The Linux kernel suppresses some ICMP error messages by default for UDP
511  * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
512  * error reporting, hopefully resulting in faster failover to working name
513  * servers.
514  */
uv__set_recverr(int fd,sa_family_t ss_family)515 static int uv__set_recverr(int fd, sa_family_t ss_family) {
516 #if defined(__linux__)
517   int yes;
518 
519   yes = 1;
520   if (ss_family == AF_INET) {
521     if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
522       return UV__ERR(errno);
523   } else if (ss_family == AF_INET6) {
524     if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
525        return UV__ERR(errno);
526   }
527 #endif
528   return 0;
529 }
530 
531 
uv__udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen,unsigned int flags)532 int uv__udp_bind(uv_udp_t* handle,
533                  const struct sockaddr* addr,
534                  unsigned int addrlen,
535                  unsigned int flags) {
536   int err;
537   int yes;
538   int fd;
539 
540   /* Check for bad flags. */
541   if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR))
542     return UV_EINVAL;
543 
544   /* Cannot set IPv6-only mode on non-IPv6 socket. */
545   if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
546     return UV_EINVAL;
547 
548   fd = handle->io_watcher.fd;
549   if (fd == -1) {
550     err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
551     if (err < 0)
552       return err;
553     fd = err;
554     handle->io_watcher.fd = fd;
555   }
556 
557   if (flags & UV_UDP_LINUX_RECVERR) {
558     err = uv__set_recverr(fd, addr->sa_family);
559     if (err)
560       return err;
561   }
562 
563   if (flags & UV_UDP_REUSEADDR) {
564     err = uv__set_reuse(fd);
565     if (err)
566       return err;
567   }
568 
569   if (flags & UV_UDP_IPV6ONLY) {
570 #ifdef IPV6_V6ONLY
571     yes = 1;
572     if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
573       err = UV__ERR(errno);
574       return err;
575     }
576 #else
577     err = UV_ENOTSUP;
578     return err;
579 #endif
580   }
581 
582   if (bind(fd, addr, addrlen)) {
583     err = UV__ERR(errno);
584     if (errno == EAFNOSUPPORT)
585       /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
586        * socket created with AF_INET to an AF_INET6 address or vice versa. */
587       err = UV_EINVAL;
588     return err;
589   }
590 
591   if (addr->sa_family == AF_INET6)
592     handle->flags |= UV_HANDLE_IPV6;
593 
594   handle->flags |= UV_HANDLE_BOUND;
595   return 0;
596 }
597 
598 
uv__udp_maybe_deferred_bind(uv_udp_t * handle,int domain,unsigned int flags)599 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
600                                        int domain,
601                                        unsigned int flags) {
602   union uv__sockaddr taddr;
603   socklen_t addrlen;
604 
605   if (handle->io_watcher.fd != -1)
606     return 0;
607 
608   switch (domain) {
609   case AF_INET:
610   {
611     struct sockaddr_in* addr = &taddr.in;
612     memset(addr, 0, sizeof *addr);
613     addr->sin_family = AF_INET;
614     addr->sin_addr.s_addr = INADDR_ANY;
615     addrlen = sizeof *addr;
616     break;
617   }
618   case AF_INET6:
619   {
620     struct sockaddr_in6* addr = &taddr.in6;
621     memset(addr, 0, sizeof *addr);
622     addr->sin6_family = AF_INET6;
623     addr->sin6_addr = in6addr_any;
624     addrlen = sizeof *addr;
625     break;
626   }
627   default:
628     assert(0 && "unsupported address family");
629     abort();
630   }
631 
632   return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
633 }
634 
635 
uv__udp_connect(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen)636 int uv__udp_connect(uv_udp_t* handle,
637                     const struct sockaddr* addr,
638                     unsigned int addrlen) {
639   int err;
640 
641   err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
642   if (err)
643     return err;
644 
645   do {
646     errno = 0;
647     err = connect(handle->io_watcher.fd, addr, addrlen);
648   } while (err == -1 && errno == EINTR);
649 
650   if (err)
651     return UV__ERR(errno);
652 
653   handle->flags |= UV_HANDLE_UDP_CONNECTED;
654 
655   return 0;
656 }
657 
658 /* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
659  * Any of uv supported UNIXs kernel should be standardized, but the kernel
660  * implementation logic not same, let's use pseudocode to explain the udp
661  * disconnect behaviors:
662  *
663  * Predefined stubs for pseudocode:
664  *   1. sodisconnect: The function to perform the real udp disconnect
665  *   2. pru_connect: The function to perform the real udp connect
666  *   3. so: The kernel object match with socket fd
667  *   4. addr: The sockaddr parameter from user space
668  *
669  * BSDs:
670  *   if(sodisconnect(so) == 0) { // udp disconnect succeed
671  *     if (addr->sa_len != so->addr->sa_len) return EINVAL;
672  *     if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
673  *     pru_connect(so);
674  *   }
675  *   else return EISCONN;
676  *
677  * z/OS (same with Windows):
678  *   if(addr->sa_len < so->addr->sa_len) return EINVAL;
679  *   if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
680  *
681  * AIX:
682  *   if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
683  *   if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
684  *
685  * Linux,Others:
686  *   if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
687  *   if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
688  */
uv__udp_disconnect(uv_udp_t * handle)689 int uv__udp_disconnect(uv_udp_t* handle) {
690     int r;
691 #if defined(__MVS__)
692     struct sockaddr_storage addr;
693 #else
694     struct sockaddr addr;
695 #endif
696 
697     memset(&addr, 0, sizeof(addr));
698 
699 #if defined(__MVS__)
700     addr.ss_family = AF_UNSPEC;
701 #else
702     addr.sa_family = AF_UNSPEC;
703 #endif
704 
705     do {
706       errno = 0;
707       r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
708     } while (r == -1 && errno == EINTR);
709 
710     if (r == -1) {
711 #if defined(BSD)  /* The macro BSD is from sys/param.h */
712       if (errno != EAFNOSUPPORT && errno != EINVAL)
713         return UV__ERR(errno);
714 #else
715       return UV__ERR(errno);
716 #endif
717     }
718 
719     handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
720     return 0;
721 }
722 
uv__udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen,uv_udp_send_cb send_cb)723 int uv__udp_send(uv_udp_send_t* req,
724                  uv_udp_t* handle,
725                  const uv_buf_t bufs[],
726                  unsigned int nbufs,
727                  const struct sockaddr* addr,
728                  unsigned int addrlen,
729                  uv_udp_send_cb send_cb) {
730   int err;
731   int empty_queue;
732 
733   assert(nbufs > 0);
734 
735   if (addr) {
736     err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
737     if (err)
738       return err;
739   }
740 
741   /* It's legal for send_queue_count > 0 even when the write_queue is empty;
742    * it means there are error-state requests in the write_completed_queue that
743    * will touch up send_queue_size/count later.
744    */
745   empty_queue = (handle->send_queue_count == 0);
746 
747   uv__req_init(handle->loop, req, UV_UDP_SEND);
748   assert(addrlen <= sizeof(req->addr));
749   if (addr == NULL)
750     req->addr.ss_family = AF_UNSPEC;
751   else
752     memcpy(&req->addr, addr, addrlen);
753   req->send_cb = send_cb;
754   req->handle = handle;
755   req->nbufs = nbufs;
756 
757   req->bufs = req->bufsml;
758   if (nbufs > ARRAY_SIZE(req->bufsml))
759     req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
760 
761   if (req->bufs == NULL) {
762     uv__req_unregister(handle->loop, req);
763     return UV_ENOMEM;
764   }
765 
766   memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
767   handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
768   handle->send_queue_count++;
769   QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
770   uv__handle_start(handle);
771 
772   if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
773     uv__udp_sendmsg(handle);
774 
775     /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
776      * away. In such cases the `io_watcher` has to be queued for asynchronous
777      * write.
778      */
779     if (!QUEUE_EMPTY(&handle->write_queue))
780       uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
781   } else {
782     uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
783   }
784 
785   return 0;
786 }
787 
788 
uv__udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen)789 int uv__udp_try_send(uv_udp_t* handle,
790                      const uv_buf_t bufs[],
791                      unsigned int nbufs,
792                      const struct sockaddr* addr,
793                      unsigned int addrlen) {
794   int err;
795   struct msghdr h;
796   ssize_t size;
797 
798   assert(nbufs > 0);
799 
800   /* already sending a message */
801   if (handle->send_queue_count != 0)
802     return UV_EAGAIN;
803 
804   if (addr) {
805     err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
806     if (err)
807       return err;
808   } else {
809     assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
810   }
811 
812   memset(&h, 0, sizeof h);
813   h.msg_name = (struct sockaddr*) addr;
814   h.msg_namelen = addrlen;
815   h.msg_iov = (struct iovec*) bufs;
816   h.msg_iovlen = nbufs;
817 
818   do {
819     size = sendmsg(handle->io_watcher.fd, &h, 0);
820   } while (size == -1 && errno == EINTR);
821 
822   if (size == -1) {
823     if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
824       return UV_EAGAIN;
825     else
826       return UV__ERR(errno);
827   }
828 
829   return size;
830 }
831 
832 
uv__udp_set_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,uv_membership membership)833 static int uv__udp_set_membership4(uv_udp_t* handle,
834                                    const struct sockaddr_in* multicast_addr,
835                                    const char* interface_addr,
836                                    uv_membership membership) {
837   struct ip_mreq mreq;
838   int optname;
839   int err;
840 
841   memset(&mreq, 0, sizeof mreq);
842 
843   if (interface_addr) {
844     err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
845     if (err)
846       return err;
847   } else {
848     mreq.imr_interface.s_addr = htonl(INADDR_ANY);
849   }
850 
851   mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
852 
853   switch (membership) {
854   case UV_JOIN_GROUP:
855     optname = IP_ADD_MEMBERSHIP;
856     break;
857   case UV_LEAVE_GROUP:
858     optname = IP_DROP_MEMBERSHIP;
859     break;
860   default:
861     return UV_EINVAL;
862   }
863 
864   if (setsockopt(handle->io_watcher.fd,
865                  IPPROTO_IP,
866                  optname,
867                  &mreq,
868                  sizeof(mreq))) {
869 #if defined(__MVS__)
870   if (errno == ENXIO)
871     return UV_ENODEV;
872 #endif
873     return UV__ERR(errno);
874   }
875 
876   return 0;
877 }
878 
879 
uv__udp_set_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,uv_membership membership)880 static int uv__udp_set_membership6(uv_udp_t* handle,
881                                    const struct sockaddr_in6* multicast_addr,
882                                    const char* interface_addr,
883                                    uv_membership membership) {
884   int optname;
885   struct ipv6_mreq mreq;
886   struct sockaddr_in6 addr6;
887 
888   memset(&mreq, 0, sizeof mreq);
889 
890   if (interface_addr) {
891     if (uv_ip6_addr(interface_addr, 0, &addr6))
892       return UV_EINVAL;
893     mreq.ipv6mr_interface = addr6.sin6_scope_id;
894   } else {
895     mreq.ipv6mr_interface = 0;
896   }
897 
898   mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
899 
900   switch (membership) {
901   case UV_JOIN_GROUP:
902     optname = IPV6_ADD_MEMBERSHIP;
903     break;
904   case UV_LEAVE_GROUP:
905     optname = IPV6_DROP_MEMBERSHIP;
906     break;
907   default:
908     return UV_EINVAL;
909   }
910 
911   if (setsockopt(handle->io_watcher.fd,
912                  IPPROTO_IPV6,
913                  optname,
914                  &mreq,
915                  sizeof(mreq))) {
916 #if defined(__MVS__)
917   if (errno == ENXIO)
918     return UV_ENODEV;
919 #endif
920     return UV__ERR(errno);
921   }
922 
923   return 0;
924 }
925 
926 
927 #if !defined(__OpenBSD__) &&                                        \
928     !defined(__NetBSD__) &&                                         \
929     !defined(__ANDROID__) &&                                        \
930     !defined(__DragonFly__) &&                                      \
931     !defined(__QNX__) &&                                            \
932     !defined(__GNU__)
uv__udp_set_source_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,const struct sockaddr_in * source_addr,uv_membership membership)933 static int uv__udp_set_source_membership4(uv_udp_t* handle,
934                                           const struct sockaddr_in* multicast_addr,
935                                           const char* interface_addr,
936                                           const struct sockaddr_in* source_addr,
937                                           uv_membership membership) {
938   struct ip_mreq_source mreq;
939   int optname;
940   int err;
941 
942   err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
943   if (err)
944     return err;
945 
946   memset(&mreq, 0, sizeof(mreq));
947 
948   if (interface_addr != NULL) {
949     err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
950     if (err)
951       return err;
952   } else {
953     mreq.imr_interface.s_addr = htonl(INADDR_ANY);
954   }
955 
956   mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
957   mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
958 
959   if (membership == UV_JOIN_GROUP)
960     optname = IP_ADD_SOURCE_MEMBERSHIP;
961   else if (membership == UV_LEAVE_GROUP)
962     optname = IP_DROP_SOURCE_MEMBERSHIP;
963   else
964     return UV_EINVAL;
965 
966   if (setsockopt(handle->io_watcher.fd,
967                  IPPROTO_IP,
968                  optname,
969                  &mreq,
970                  sizeof(mreq))) {
971     return UV__ERR(errno);
972   }
973 
974   return 0;
975 }
976 
977 
uv__udp_set_source_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,const struct sockaddr_in6 * source_addr,uv_membership membership)978 static int uv__udp_set_source_membership6(uv_udp_t* handle,
979                                           const struct sockaddr_in6* multicast_addr,
980                                           const char* interface_addr,
981                                           const struct sockaddr_in6* source_addr,
982                                           uv_membership membership) {
983   struct group_source_req mreq;
984   struct sockaddr_in6 addr6;
985   int optname;
986   int err;
987 
988   err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
989   if (err)
990     return err;
991 
992   memset(&mreq, 0, sizeof(mreq));
993 
994   if (interface_addr != NULL) {
995     err = uv_ip6_addr(interface_addr, 0, &addr6);
996     if (err)
997       return err;
998     mreq.gsr_interface = addr6.sin6_scope_id;
999   } else {
1000     mreq.gsr_interface = 0;
1001   }
1002 
1003   STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
1004   STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
1005   memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
1006   memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
1007 
1008   if (membership == UV_JOIN_GROUP)
1009     optname = MCAST_JOIN_SOURCE_GROUP;
1010   else if (membership == UV_LEAVE_GROUP)
1011     optname = MCAST_LEAVE_SOURCE_GROUP;
1012   else
1013     return UV_EINVAL;
1014 
1015   if (setsockopt(handle->io_watcher.fd,
1016                  IPPROTO_IPV6,
1017                  optname,
1018                  &mreq,
1019                  sizeof(mreq))) {
1020     return UV__ERR(errno);
1021   }
1022 
1023   return 0;
1024 }
1025 #endif
1026 
1027 
uv__udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned flags,int domain)1028 int uv__udp_init_ex(uv_loop_t* loop,
1029                     uv_udp_t* handle,
1030                     unsigned flags,
1031                     int domain) {
1032   int fd;
1033 
1034   fd = -1;
1035   if (domain != AF_UNSPEC) {
1036     fd = uv__socket(domain, SOCK_DGRAM, 0);
1037     if (fd < 0)
1038       return fd;
1039   }
1040 
1041   uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
1042   handle->alloc_cb = NULL;
1043   handle->recv_cb = NULL;
1044   handle->send_queue_size = 0;
1045   handle->send_queue_count = 0;
1046   uv__io_init(&handle->io_watcher, uv__udp_io, fd);
1047   QUEUE_INIT(&handle->write_queue);
1048   QUEUE_INIT(&handle->write_completed_queue);
1049 
1050   return 0;
1051 }
1052 
1053 
uv_udp_using_recvmmsg(const uv_udp_t * handle)1054 int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
1055 #if HAVE_MMSG
1056   if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
1057     uv_once(&once, uv__udp_mmsg_init);
1058     return uv__recvmmsg_avail;
1059   }
1060 #endif
1061   return 0;
1062 }
1063 
1064 
uv_udp_open(uv_udp_t * handle,uv_os_sock_t sock)1065 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
1066   int err;
1067 
1068   /* Check for already active socket. */
1069   if (handle->io_watcher.fd != -1)
1070     return UV_EBUSY;
1071 
1072   if (uv__fd_exists(handle->loop, sock))
1073     return UV_EEXIST;
1074 
1075   err = uv__nonblock(sock, 1);
1076   if (err)
1077     return err;
1078 
1079   err = uv__set_reuse(sock);
1080   if (err)
1081     return err;
1082 
1083   handle->io_watcher.fd = sock;
1084   if (uv__udp_is_connected(handle))
1085     handle->flags |= UV_HANDLE_UDP_CONNECTED;
1086 
1087   return 0;
1088 }
1089 
1090 
uv_udp_set_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,uv_membership membership)1091 int uv_udp_set_membership(uv_udp_t* handle,
1092                           const char* multicast_addr,
1093                           const char* interface_addr,
1094                           uv_membership membership) {
1095   int err;
1096   struct sockaddr_in addr4;
1097   struct sockaddr_in6 addr6;
1098 
1099   if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
1100     err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
1101     if (err)
1102       return err;
1103     return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1104   } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
1105     err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
1106     if (err)
1107       return err;
1108     return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1109   } else {
1110     return UV_EINVAL;
1111   }
1112 }
1113 
1114 
uv_udp_set_source_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,const char * source_addr,uv_membership membership)1115 int uv_udp_set_source_membership(uv_udp_t* handle,
1116                                  const char* multicast_addr,
1117                                  const char* interface_addr,
1118                                  const char* source_addr,
1119                                  uv_membership membership) {
1120 #if !defined(__OpenBSD__) &&                                        \
1121     !defined(__NetBSD__) &&                                         \
1122     !defined(__ANDROID__) &&                                        \
1123     !defined(__DragonFly__) &&                                      \
1124     !defined(__QNX__) &&                                            \
1125     !defined(__GNU__)
1126   int err;
1127   union uv__sockaddr mcast_addr;
1128   union uv__sockaddr src_addr;
1129 
1130   err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
1131   if (err) {
1132     err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
1133     if (err)
1134       return err;
1135     err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
1136     if (err)
1137       return err;
1138     return uv__udp_set_source_membership6(handle,
1139                                           &mcast_addr.in6,
1140                                           interface_addr,
1141                                           &src_addr.in6,
1142                                           membership);
1143   }
1144 
1145   err = uv_ip4_addr(source_addr, 0, &src_addr.in);
1146   if (err)
1147     return err;
1148   return uv__udp_set_source_membership4(handle,
1149                                         &mcast_addr.in,
1150                                         interface_addr,
1151                                         &src_addr.in,
1152                                         membership);
1153 #else
1154   return UV_ENOSYS;
1155 #endif
1156 }
1157 
1158 
uv__setsockopt(uv_udp_t * handle,int option4,int option6,const void * val,socklen_t size)1159 static int uv__setsockopt(uv_udp_t* handle,
1160                          int option4,
1161                          int option6,
1162                          const void* val,
1163                          socklen_t size) {
1164   int r;
1165 
1166   if (handle->flags & UV_HANDLE_IPV6)
1167     r = setsockopt(handle->io_watcher.fd,
1168                    IPPROTO_IPV6,
1169                    option6,
1170                    val,
1171                    size);
1172   else
1173     r = setsockopt(handle->io_watcher.fd,
1174                    IPPROTO_IP,
1175                    option4,
1176                    val,
1177                    size);
1178   if (r)
1179     return UV__ERR(errno);
1180 
1181   return 0;
1182 }
1183 
uv__setsockopt_maybe_char(uv_udp_t * handle,int option4,int option6,int val)1184 static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1185                                      int option4,
1186                                      int option6,
1187                                      int val) {
1188 #if defined(__sun) || defined(_AIX) || defined(__MVS__)
1189   char arg = val;
1190 #elif defined(__OpenBSD__)
1191   unsigned char arg = val;
1192 #else
1193   int arg = val;
1194 #endif
1195 
1196   if (val < 0 || val > 255)
1197     return UV_EINVAL;
1198 
1199   return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1200 }
1201 
1202 
uv_udp_set_broadcast(uv_udp_t * handle,int on)1203 int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1204   if (setsockopt(handle->io_watcher.fd,
1205                  SOL_SOCKET,
1206                  SO_BROADCAST,
1207                  &on,
1208                  sizeof(on))) {
1209     return UV__ERR(errno);
1210   }
1211 
1212   return 0;
1213 }
1214 
1215 
uv_udp_set_ttl(uv_udp_t * handle,int ttl)1216 int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1217   if (ttl < 1 || ttl > 255)
1218     return UV_EINVAL;
1219 
1220 #if defined(__MVS__)
1221   if (!(handle->flags & UV_HANDLE_IPV6))
1222     return UV_ENOTSUP;  /* zOS does not support setting ttl for IPv4 */
1223 #endif
1224 
1225 /*
1226  * On Solaris and derivatives such as SmartOS, the length of socket options
1227  * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
1228  * so hardcode the size of these options on this platform,
1229  * and use the general uv__setsockopt_maybe_char call on other platforms.
1230  */
1231 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1232     defined(__MVS__) || defined(__QNX__)
1233 
1234   return uv__setsockopt(handle,
1235                         IP_TTL,
1236                         IPV6_UNICAST_HOPS,
1237                         &ttl,
1238                         sizeof(ttl));
1239 
1240 #else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1241            defined(__MVS__) || defined(__QNX__)) */
1242 
1243   return uv__setsockopt_maybe_char(handle,
1244                                    IP_TTL,
1245                                    IPV6_UNICAST_HOPS,
1246                                    ttl);
1247 
1248 #endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1249           defined(__MVS__) || defined(__QNX__) */
1250 }
1251 
1252 
uv_udp_set_multicast_ttl(uv_udp_t * handle,int ttl)1253 int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1254 /*
1255  * On Solaris and derivatives such as SmartOS, the length of socket options
1256  * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
1257  * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
1258  * and use the general uv__setsockopt_maybe_char call otherwise.
1259  */
1260 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1261     defined(__MVS__) || defined(__QNX__)
1262   if (handle->flags & UV_HANDLE_IPV6)
1263     return uv__setsockopt(handle,
1264                           IP_MULTICAST_TTL,
1265                           IPV6_MULTICAST_HOPS,
1266                           &ttl,
1267                           sizeof(ttl));
1268 #endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1269     defined(__MVS__) || defined(__QNX__) */
1270 
1271   return uv__setsockopt_maybe_char(handle,
1272                                    IP_MULTICAST_TTL,
1273                                    IPV6_MULTICAST_HOPS,
1274                                    ttl);
1275 }
1276 
1277 
uv_udp_set_multicast_loop(uv_udp_t * handle,int on)1278 int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1279 /*
1280  * On Solaris and derivatives such as SmartOS, the length of socket options
1281  * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
1282  * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
1283  * and use the general uv__setsockopt_maybe_char call otherwise.
1284  */
1285 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1286     defined(__MVS__) || defined(__QNX__)
1287   if (handle->flags & UV_HANDLE_IPV6)
1288     return uv__setsockopt(handle,
1289                           IP_MULTICAST_LOOP,
1290                           IPV6_MULTICAST_LOOP,
1291                           &on,
1292                           sizeof(on));
1293 #endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
1294     defined(__MVS__) || defined(__QNX__) */
1295 
1296   return uv__setsockopt_maybe_char(handle,
1297                                    IP_MULTICAST_LOOP,
1298                                    IPV6_MULTICAST_LOOP,
1299                                    on);
1300 }
1301 
uv_udp_set_multicast_interface(uv_udp_t * handle,const char * interface_addr)1302 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1303   struct sockaddr_storage addr_st;
1304   struct sockaddr_in* addr4;
1305   struct sockaddr_in6* addr6;
1306 
1307   addr4 = (struct sockaddr_in*) &addr_st;
1308   addr6 = (struct sockaddr_in6*) &addr_st;
1309 
1310   if (!interface_addr) {
1311     memset(&addr_st, 0, sizeof addr_st);
1312     if (handle->flags & UV_HANDLE_IPV6) {
1313       addr_st.ss_family = AF_INET6;
1314       addr6->sin6_scope_id = 0;
1315     } else {
1316       addr_st.ss_family = AF_INET;
1317       addr4->sin_addr.s_addr = htonl(INADDR_ANY);
1318     }
1319   } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
1320     /* nothing, address was parsed */
1321   } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
1322     /* nothing, address was parsed */
1323   } else {
1324     return UV_EINVAL;
1325   }
1326 
1327   if (addr_st.ss_family == AF_INET) {
1328     if (setsockopt(handle->io_watcher.fd,
1329                    IPPROTO_IP,
1330                    IP_MULTICAST_IF,
1331                    (void*) &addr4->sin_addr,
1332                    sizeof(addr4->sin_addr)) == -1) {
1333       return UV__ERR(errno);
1334     }
1335   } else if (addr_st.ss_family == AF_INET6) {
1336     if (setsockopt(handle->io_watcher.fd,
1337                    IPPROTO_IPV6,
1338                    IPV6_MULTICAST_IF,
1339                    &addr6->sin6_scope_id,
1340                    sizeof(addr6->sin6_scope_id)) == -1) {
1341       return UV__ERR(errno);
1342     }
1343   } else {
1344     assert(0 && "unexpected address family");
1345     abort();
1346   }
1347 
1348   return 0;
1349 }
1350 
uv_udp_getpeername(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1351 int uv_udp_getpeername(const uv_udp_t* handle,
1352                        struct sockaddr* name,
1353                        int* namelen) {
1354 
1355   return uv__getsockpeername((const uv_handle_t*) handle,
1356                              getpeername,
1357                              name,
1358                              namelen);
1359 }
1360 
uv_udp_getsockname(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1361 int uv_udp_getsockname(const uv_udp_t* handle,
1362                        struct sockaddr* name,
1363                        int* namelen) {
1364 
1365   return uv__getsockpeername((const uv_handle_t*) handle,
1366                              getsockname,
1367                              name,
1368                              namelen);
1369 }
1370 
1371 
uv__udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)1372 int uv__udp_recv_start(uv_udp_t* handle,
1373                        uv_alloc_cb alloc_cb,
1374                        uv_udp_recv_cb recv_cb) {
1375   int err;
1376 
1377   if (alloc_cb == NULL || recv_cb == NULL)
1378     return UV_EINVAL;
1379 
1380   if (uv__io_active(&handle->io_watcher, POLLIN))
1381     return UV_EALREADY;  /* FIXME(bnoordhuis) Should be UV_EBUSY. */
1382 
1383   err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
1384   if (err)
1385     return err;
1386 
1387   handle->alloc_cb = alloc_cb;
1388   handle->recv_cb = recv_cb;
1389 
1390   uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
1391   uv__handle_start(handle);
1392 
1393   return 0;
1394 }
1395 
1396 
uv__udp_recv_stop(uv_udp_t * handle)1397 int uv__udp_recv_stop(uv_udp_t* handle) {
1398   uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
1399 
1400   if (!uv__io_active(&handle->io_watcher, POLLOUT))
1401     uv__handle_stop(handle);
1402 
1403   handle->alloc_cb = NULL;
1404   handle->recv_cb = NULL;
1405 
1406   return 0;
1407 }
1408