1 /* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22 #include "uv.h"
23 #include "internal.h"
24
25 #include <assert.h>
26 #include <string.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <unistd.h>
30 #if defined(__MVS__)
31 #include <xti.h>
32 #endif
33 #include <sys/un.h>
34
35 #define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
36
37 #if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
38 # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
39 #endif
40
41 #if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
42 # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
43 #endif
44
45
46 static void uv__udp_run_completed(uv_udp_t* handle);
47 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
48 static void uv__udp_recvmsg(uv_udp_t* handle);
49 static void uv__udp_sendmsg(uv_udp_t* handle);
50 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
51 int domain,
52 unsigned int flags);
53
54 #if HAVE_MMSG
55
56 #define UV__MMSG_MAXWIDTH 20
57
58 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
59 static void uv__udp_sendmmsg(uv_udp_t* handle);
60
61 static int uv__recvmmsg_avail;
62 static int uv__sendmmsg_avail;
63 static uv_once_t once = UV_ONCE_INIT;
64
uv__udp_mmsg_init(void)65 static void uv__udp_mmsg_init(void) {
66 int ret;
67 int s;
68 s = uv__socket(AF_INET, SOCK_DGRAM, 0);
69 if (s < 0)
70 return;
71 ret = uv__sendmmsg(s, NULL, 0, 0);
72 if (ret == 0 || errno != ENOSYS) {
73 uv__sendmmsg_avail = 1;
74 uv__recvmmsg_avail = 1;
75 } else {
76 ret = uv__recvmmsg(s, NULL, 0, 0, NULL);
77 if (ret == 0 || errno != ENOSYS)
78 uv__recvmmsg_avail = 1;
79 }
80 uv__close(s);
81 }
82
83 #endif
84
uv__udp_close(uv_udp_t * handle)85 void uv__udp_close(uv_udp_t* handle) {
86 uv__io_close(handle->loop, &handle->io_watcher);
87 uv__handle_stop(handle);
88
89 if (handle->io_watcher.fd != -1) {
90 uv__close(handle->io_watcher.fd);
91 handle->io_watcher.fd = -1;
92 }
93 }
94
95
uv__udp_finish_close(uv_udp_t * handle)96 void uv__udp_finish_close(uv_udp_t* handle) {
97 uv_udp_send_t* req;
98 QUEUE* q;
99
100 assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
101 assert(handle->io_watcher.fd == -1);
102
103 while (!QUEUE_EMPTY(&handle->write_queue)) {
104 q = QUEUE_HEAD(&handle->write_queue);
105 QUEUE_REMOVE(q);
106
107 req = QUEUE_DATA(q, uv_udp_send_t, queue);
108 req->status = UV_ECANCELED;
109 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
110 }
111
112 uv__udp_run_completed(handle);
113
114 assert(handle->send_queue_size == 0);
115 assert(handle->send_queue_count == 0);
116
117 /* Now tear down the handle. */
118 handle->recv_cb = NULL;
119 handle->alloc_cb = NULL;
120 /* but _do not_ touch close_cb */
121 }
122
123
uv__udp_run_completed(uv_udp_t * handle)124 static void uv__udp_run_completed(uv_udp_t* handle) {
125 uv_udp_send_t* req;
126 QUEUE* q;
127
128 assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
129 handle->flags |= UV_HANDLE_UDP_PROCESSING;
130
131 while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
132 q = QUEUE_HEAD(&handle->write_completed_queue);
133 QUEUE_REMOVE(q);
134
135 req = QUEUE_DATA(q, uv_udp_send_t, queue);
136 uv__req_unregister(handle->loop, req);
137
138 handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
139 handle->send_queue_count--;
140
141 if (req->bufs != req->bufsml)
142 uv__free(req->bufs);
143 req->bufs = NULL;
144
145 if (req->send_cb == NULL)
146 continue;
147
148 /* req->status >= 0 == bytes written
149 * req->status < 0 == errno
150 */
151 if (req->status >= 0)
152 req->send_cb(req, 0);
153 else
154 req->send_cb(req, req->status);
155 }
156
157 if (QUEUE_EMPTY(&handle->write_queue)) {
158 /* Pending queue and completion queue empty, stop watcher. */
159 uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
160 if (!uv__io_active(&handle->io_watcher, POLLIN))
161 uv__handle_stop(handle);
162 }
163
164 handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
165 }
166
167
uv__udp_io(uv_loop_t * loop,uv__io_t * w,unsigned int revents)168 static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
169 uv_udp_t* handle;
170
171 handle = container_of(w, uv_udp_t, io_watcher);
172 assert(handle->type == UV_UDP);
173
174 if (revents & POLLIN)
175 uv__udp_recvmsg(handle);
176
177 if (revents & POLLOUT) {
178 uv__udp_sendmsg(handle);
179 uv__udp_run_completed(handle);
180 }
181 }
182
183 #if HAVE_MMSG
uv__udp_recvmmsg(uv_udp_t * handle,uv_buf_t * buf)184 static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
185 struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
186 struct iovec iov[UV__MMSG_MAXWIDTH];
187 struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
188 ssize_t nread;
189 uv_buf_t chunk_buf;
190 size_t chunks;
191 int flags;
192 size_t k;
193
194 /* prepare structures for recvmmsg */
195 chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
196 if (chunks > ARRAY_SIZE(iov))
197 chunks = ARRAY_SIZE(iov);
198 for (k = 0; k < chunks; ++k) {
199 iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
200 iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
201 msgs[k].msg_hdr.msg_iov = iov + k;
202 msgs[k].msg_hdr.msg_iovlen = 1;
203 msgs[k].msg_hdr.msg_name = peers + k;
204 msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
205 }
206
207 do
208 nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks, 0, NULL);
209 while (nread == -1 && errno == EINTR);
210
211 if (nread < 1) {
212 if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
213 handle->recv_cb(handle, 0, buf, NULL, 0);
214 else
215 handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
216 } else {
217 /* count to zero, so the buffer base comes last */
218 for (k = nread; k > 0 && handle->recv_cb != NULL;) {
219 k--;
220 flags = 0;
221 if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
222 flags |= UV_UDP_PARTIAL;
223 if (k != 0)
224 flags |= UV_UDP_MMSG_CHUNK;
225
226 chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
227 handle->recv_cb(handle,
228 msgs[k].msg_len,
229 &chunk_buf,
230 msgs[k].msg_hdr.msg_name,
231 flags);
232 }
233 }
234 return nread;
235 }
236 #endif
237
uv__udp_recvmsg(uv_udp_t * handle)238 static void uv__udp_recvmsg(uv_udp_t* handle) {
239 struct sockaddr_storage peer;
240 struct msghdr h;
241 ssize_t nread;
242 uv_buf_t buf;
243 int flags;
244 int count;
245
246 assert(handle->recv_cb != NULL);
247 assert(handle->alloc_cb != NULL);
248
249 /* Prevent loop starvation when the data comes in as fast as (or faster than)
250 * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
251 */
252 count = 32;
253
254 do {
255 buf = uv_buf_init(NULL, 0);
256 handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
257 if (buf.base == NULL || buf.len == 0) {
258 handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
259 return;
260 }
261 assert(buf.base != NULL);
262
263 #if HAVE_MMSG
264 uv_once(&once, uv__udp_mmsg_init);
265 if (uv__recvmmsg_avail) {
266 /* Returned space for more than 1 datagram, use it to receive
267 * multiple datagrams. */
268 if (buf.len >= 2 * UV__UDP_DGRAM_MAXSIZE) {
269 nread = uv__udp_recvmmsg(handle, &buf);
270 if (nread > 0)
271 count -= nread;
272 continue;
273 }
274 }
275 #endif
276
277 memset(&h, 0, sizeof(h));
278 memset(&peer, 0, sizeof(peer));
279 h.msg_name = &peer;
280 h.msg_namelen = sizeof(peer);
281 h.msg_iov = (void*) &buf;
282 h.msg_iovlen = 1;
283
284 do {
285 nread = recvmsg(handle->io_watcher.fd, &h, 0);
286 }
287 while (nread == -1 && errno == EINTR);
288
289 if (nread == -1) {
290 if (errno == EAGAIN || errno == EWOULDBLOCK)
291 handle->recv_cb(handle, 0, &buf, NULL, 0);
292 else
293 handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
294 }
295 else {
296 flags = 0;
297 if (h.msg_flags & MSG_TRUNC)
298 flags |= UV_UDP_PARTIAL;
299
300 handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
301 }
302 count--;
303 }
304 /* recv_cb callback may decide to pause or close the handle */
305 while (nread != -1
306 && count > 0
307 && handle->io_watcher.fd != -1
308 && handle->recv_cb != NULL);
309 }
310
311 #if HAVE_MMSG
uv__udp_sendmmsg(uv_udp_t * handle)312 static void uv__udp_sendmmsg(uv_udp_t* handle) {
313 uv_udp_send_t* req;
314 struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
315 struct uv__mmsghdr *p;
316 QUEUE* q;
317 ssize_t npkts;
318 size_t pkts;
319 size_t i;
320
321 if (QUEUE_EMPTY(&handle->write_queue))
322 return;
323
324 write_queue_drain:
325 for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
326 pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
327 ++pkts, q = QUEUE_HEAD(q)) {
328 assert(q != NULL);
329 req = QUEUE_DATA(q, uv_udp_send_t, queue);
330 assert(req != NULL);
331
332 p = &h[pkts];
333 memset(p, 0, sizeof(*p));
334 if (req->addr.ss_family == AF_UNSPEC) {
335 p->msg_hdr.msg_name = NULL;
336 p->msg_hdr.msg_namelen = 0;
337 } else {
338 p->msg_hdr.msg_name = &req->addr;
339 if (req->addr.ss_family == AF_INET6)
340 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
341 else if (req->addr.ss_family == AF_INET)
342 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
343 else if (req->addr.ss_family == AF_UNIX)
344 p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
345 else {
346 assert(0 && "unsupported address family");
347 abort();
348 }
349 }
350 h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
351 h[pkts].msg_hdr.msg_iovlen = req->nbufs;
352 }
353
354 do
355 npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts, 0);
356 while (npkts == -1 && errno == EINTR);
357
358 if (npkts < 1) {
359 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
360 return;
361 for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
362 i < pkts && q != &handle->write_queue;
363 ++i, q = QUEUE_HEAD(q)) {
364 assert(q != NULL);
365 req = QUEUE_DATA(q, uv_udp_send_t, queue);
366 assert(req != NULL);
367
368 req->status = UV__ERR(errno);
369 QUEUE_REMOVE(&req->queue);
370 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
371 }
372 uv__io_feed(handle->loop, &handle->io_watcher);
373 return;
374 }
375
376 for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
377 i < pkts && q != &handle->write_queue;
378 ++i, q = QUEUE_HEAD(&handle->write_queue)) {
379 assert(q != NULL);
380 req = QUEUE_DATA(q, uv_udp_send_t, queue);
381 assert(req != NULL);
382
383 req->status = req->bufs[0].len;
384
385 /* Sending a datagram is an atomic operation: either all data
386 * is written or nothing is (and EMSGSIZE is raised). That is
387 * why we don't handle partial writes. Just pop the request
388 * off the write queue and onto the completed queue, done.
389 */
390 QUEUE_REMOVE(&req->queue);
391 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
392 }
393
394 /* couldn't batch everything, continue sending (jump to avoid stack growth) */
395 if (!QUEUE_EMPTY(&handle->write_queue))
396 goto write_queue_drain;
397 uv__io_feed(handle->loop, &handle->io_watcher);
398 return;
399 }
400 #endif
401
uv__udp_sendmsg(uv_udp_t * handle)402 static void uv__udp_sendmsg(uv_udp_t* handle) {
403 uv_udp_send_t* req;
404 struct msghdr h;
405 QUEUE* q;
406 ssize_t size;
407
408 #if HAVE_MMSG
409 uv_once(&once, uv__udp_mmsg_init);
410 if (uv__sendmmsg_avail) {
411 uv__udp_sendmmsg(handle);
412 return;
413 }
414 #endif
415
416 while (!QUEUE_EMPTY(&handle->write_queue)) {
417 q = QUEUE_HEAD(&handle->write_queue);
418 assert(q != NULL);
419
420 req = QUEUE_DATA(q, uv_udp_send_t, queue);
421 assert(req != NULL);
422
423 memset(&h, 0, sizeof h);
424 if (req->addr.ss_family == AF_UNSPEC) {
425 h.msg_name = NULL;
426 h.msg_namelen = 0;
427 } else {
428 h.msg_name = &req->addr;
429 if (req->addr.ss_family == AF_INET6)
430 h.msg_namelen = sizeof(struct sockaddr_in6);
431 else if (req->addr.ss_family == AF_INET)
432 h.msg_namelen = sizeof(struct sockaddr_in);
433 else if (req->addr.ss_family == AF_UNIX)
434 h.msg_namelen = sizeof(struct sockaddr_un);
435 else {
436 assert(0 && "unsupported address family");
437 abort();
438 }
439 }
440 h.msg_iov = (struct iovec*) req->bufs;
441 h.msg_iovlen = req->nbufs;
442
443 do {
444 size = sendmsg(handle->io_watcher.fd, &h, 0);
445 } while (size == -1 && errno == EINTR);
446
447 if (size == -1) {
448 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
449 break;
450 }
451
452 req->status = (size == -1 ? UV__ERR(errno) : size);
453
454 /* Sending a datagram is an atomic operation: either all data
455 * is written or nothing is (and EMSGSIZE is raised). That is
456 * why we don't handle partial writes. Just pop the request
457 * off the write queue and onto the completed queue, done.
458 */
459 QUEUE_REMOVE(&req->queue);
460 QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
461 uv__io_feed(handle->loop, &handle->io_watcher);
462 }
463 }
464
465 /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
466 * refinements for programs that use multicast.
467 *
468 * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
469 * are different from the BSDs: it _shares_ the port rather than steal it
470 * from the current listener. While useful, it's not something we can emulate
471 * on other platforms so we don't enable it.
472 *
473 * zOS does not support getsockname with SO_REUSEPORT option when using
474 * AF_UNIX.
475 */
uv__set_reuse(int fd)476 static int uv__set_reuse(int fd) {
477 int yes;
478 yes = 1;
479
480 #if defined(SO_REUSEPORT) && defined(__MVS__)
481 struct sockaddr_in sockfd;
482 unsigned int sockfd_len = sizeof(sockfd);
483 if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
484 return UV__ERR(errno);
485 if (sockfd.sin_family == AF_UNIX) {
486 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
487 return UV__ERR(errno);
488 } else {
489 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
490 return UV__ERR(errno);
491 }
492 #elif defined(SO_REUSEPORT) && !defined(__linux__)
493 if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
494 return UV__ERR(errno);
495 #else
496 if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
497 return UV__ERR(errno);
498 #endif
499
500 return 0;
501 }
502
503
uv__udp_bind(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen,unsigned int flags)504 int uv__udp_bind(uv_udp_t* handle,
505 const struct sockaddr* addr,
506 unsigned int addrlen,
507 unsigned int flags) {
508 int err;
509 int yes;
510 int fd;
511
512 /* Check for bad flags. */
513 if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR))
514 return UV_EINVAL;
515
516 /* Cannot set IPv6-only mode on non-IPv6 socket. */
517 if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
518 return UV_EINVAL;
519
520 fd = handle->io_watcher.fd;
521 if (fd == -1) {
522 err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
523 if (err < 0)
524 return err;
525 fd = err;
526 handle->io_watcher.fd = fd;
527 }
528
529 if (flags & UV_UDP_REUSEADDR) {
530 err = uv__set_reuse(fd);
531 if (err)
532 return err;
533 }
534
535 if (flags & UV_UDP_IPV6ONLY) {
536 #ifdef IPV6_V6ONLY
537 yes = 1;
538 if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
539 err = UV__ERR(errno);
540 return err;
541 }
542 #else
543 err = UV_ENOTSUP;
544 return err;
545 #endif
546 }
547
548 if (bind(fd, addr, addrlen)) {
549 err = UV__ERR(errno);
550 if (errno == EAFNOSUPPORT)
551 /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
552 * socket created with AF_INET to an AF_INET6 address or vice versa. */
553 err = UV_EINVAL;
554 return err;
555 }
556
557 if (addr->sa_family == AF_INET6)
558 handle->flags |= UV_HANDLE_IPV6;
559
560 handle->flags |= UV_HANDLE_BOUND;
561 return 0;
562 }
563
564
uv__udp_maybe_deferred_bind(uv_udp_t * handle,int domain,unsigned int flags)565 static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
566 int domain,
567 unsigned int flags) {
568 union {
569 struct sockaddr_in6 in6;
570 struct sockaddr_in in;
571 struct sockaddr addr;
572 } taddr;
573 socklen_t addrlen;
574
575 if (handle->io_watcher.fd != -1)
576 return 0;
577
578 switch (domain) {
579 case AF_INET:
580 {
581 struct sockaddr_in* addr = &taddr.in;
582 memset(addr, 0, sizeof *addr);
583 addr->sin_family = AF_INET;
584 addr->sin_addr.s_addr = INADDR_ANY;
585 addrlen = sizeof *addr;
586 break;
587 }
588 case AF_INET6:
589 {
590 struct sockaddr_in6* addr = &taddr.in6;
591 memset(addr, 0, sizeof *addr);
592 addr->sin6_family = AF_INET6;
593 addr->sin6_addr = in6addr_any;
594 addrlen = sizeof *addr;
595 break;
596 }
597 default:
598 assert(0 && "unsupported address family");
599 abort();
600 }
601
602 return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
603 }
604
605
uv__udp_connect(uv_udp_t * handle,const struct sockaddr * addr,unsigned int addrlen)606 int uv__udp_connect(uv_udp_t* handle,
607 const struct sockaddr* addr,
608 unsigned int addrlen) {
609 int err;
610
611 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
612 if (err)
613 return err;
614
615 do {
616 errno = 0;
617 err = connect(handle->io_watcher.fd, addr, addrlen);
618 } while (err == -1 && errno == EINTR);
619
620 if (err)
621 return UV__ERR(errno);
622
623 handle->flags |= UV_HANDLE_UDP_CONNECTED;
624
625 return 0;
626 }
627
628
uv__udp_disconnect(uv_udp_t * handle)629 int uv__udp_disconnect(uv_udp_t* handle) {
630 int r;
631 struct sockaddr addr;
632
633 memset(&addr, 0, sizeof(addr));
634
635 addr.sa_family = AF_UNSPEC;
636
637 do {
638 errno = 0;
639 r = connect(handle->io_watcher.fd, &addr, sizeof(addr));
640 } while (r == -1 && errno == EINTR);
641
642 if (r == -1 && errno != EAFNOSUPPORT)
643 return UV__ERR(errno);
644
645 handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
646 return 0;
647 }
648
649
uv__udp_send(uv_udp_send_t * req,uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen,uv_udp_send_cb send_cb)650 int uv__udp_send(uv_udp_send_t* req,
651 uv_udp_t* handle,
652 const uv_buf_t bufs[],
653 unsigned int nbufs,
654 const struct sockaddr* addr,
655 unsigned int addrlen,
656 uv_udp_send_cb send_cb) {
657 int err;
658 int empty_queue;
659
660 assert(nbufs > 0);
661
662 if (addr) {
663 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
664 if (err)
665 return err;
666 }
667
668 /* It's legal for send_queue_count > 0 even when the write_queue is empty;
669 * it means there are error-state requests in the write_completed_queue that
670 * will touch up send_queue_size/count later.
671 */
672 empty_queue = (handle->send_queue_count == 0);
673
674 uv__req_init(handle->loop, req, UV_UDP_SEND);
675 assert(addrlen <= sizeof(req->addr));
676 if (addr == NULL)
677 req->addr.ss_family = AF_UNSPEC;
678 else
679 memcpy(&req->addr, addr, addrlen);
680 req->send_cb = send_cb;
681 req->handle = handle;
682 req->nbufs = nbufs;
683
684 req->bufs = req->bufsml;
685 if (nbufs > ARRAY_SIZE(req->bufsml))
686 req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
687
688 if (req->bufs == NULL) {
689 uv__req_unregister(handle->loop, req);
690 return UV_ENOMEM;
691 }
692
693 memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
694 handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
695 handle->send_queue_count++;
696 QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
697 uv__handle_start(handle);
698
699 if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
700 uv__udp_sendmsg(handle);
701
702 /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
703 * away. In such cases the `io_watcher` has to be queued for asynchronous
704 * write.
705 */
706 if (!QUEUE_EMPTY(&handle->write_queue))
707 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
708 } else {
709 uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
710 }
711
712 return 0;
713 }
714
715
uv__udp_try_send(uv_udp_t * handle,const uv_buf_t bufs[],unsigned int nbufs,const struct sockaddr * addr,unsigned int addrlen)716 int uv__udp_try_send(uv_udp_t* handle,
717 const uv_buf_t bufs[],
718 unsigned int nbufs,
719 const struct sockaddr* addr,
720 unsigned int addrlen) {
721 int err;
722 struct msghdr h;
723 ssize_t size;
724
725 assert(nbufs > 0);
726
727 /* already sending a message */
728 if (handle->send_queue_count != 0)
729 return UV_EAGAIN;
730
731 if (addr) {
732 err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
733 if (err)
734 return err;
735 } else {
736 assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
737 }
738
739 memset(&h, 0, sizeof h);
740 h.msg_name = (struct sockaddr*) addr;
741 h.msg_namelen = addrlen;
742 h.msg_iov = (struct iovec*) bufs;
743 h.msg_iovlen = nbufs;
744
745 do {
746 size = sendmsg(handle->io_watcher.fd, &h, 0);
747 } while (size == -1 && errno == EINTR);
748
749 if (size == -1) {
750 if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
751 return UV_EAGAIN;
752 else
753 return UV__ERR(errno);
754 }
755
756 return size;
757 }
758
759
uv__udp_set_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,uv_membership membership)760 static int uv__udp_set_membership4(uv_udp_t* handle,
761 const struct sockaddr_in* multicast_addr,
762 const char* interface_addr,
763 uv_membership membership) {
764 struct ip_mreq mreq;
765 int optname;
766 int err;
767
768 memset(&mreq, 0, sizeof mreq);
769
770 if (interface_addr) {
771 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
772 if (err)
773 return err;
774 } else {
775 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
776 }
777
778 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
779
780 switch (membership) {
781 case UV_JOIN_GROUP:
782 optname = IP_ADD_MEMBERSHIP;
783 break;
784 case UV_LEAVE_GROUP:
785 optname = IP_DROP_MEMBERSHIP;
786 break;
787 default:
788 return UV_EINVAL;
789 }
790
791 if (setsockopt(handle->io_watcher.fd,
792 IPPROTO_IP,
793 optname,
794 &mreq,
795 sizeof(mreq))) {
796 #if defined(__MVS__)
797 if (errno == ENXIO)
798 return UV_ENODEV;
799 #endif
800 return UV__ERR(errno);
801 }
802
803 return 0;
804 }
805
806
uv__udp_set_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,uv_membership membership)807 static int uv__udp_set_membership6(uv_udp_t* handle,
808 const struct sockaddr_in6* multicast_addr,
809 const char* interface_addr,
810 uv_membership membership) {
811 int optname;
812 struct ipv6_mreq mreq;
813 struct sockaddr_in6 addr6;
814
815 memset(&mreq, 0, sizeof mreq);
816
817 if (interface_addr) {
818 if (uv_ip6_addr(interface_addr, 0, &addr6))
819 return UV_EINVAL;
820 mreq.ipv6mr_interface = addr6.sin6_scope_id;
821 } else {
822 mreq.ipv6mr_interface = 0;
823 }
824
825 mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
826
827 switch (membership) {
828 case UV_JOIN_GROUP:
829 optname = IPV6_ADD_MEMBERSHIP;
830 break;
831 case UV_LEAVE_GROUP:
832 optname = IPV6_DROP_MEMBERSHIP;
833 break;
834 default:
835 return UV_EINVAL;
836 }
837
838 if (setsockopt(handle->io_watcher.fd,
839 IPPROTO_IPV6,
840 optname,
841 &mreq,
842 sizeof(mreq))) {
843 #if defined(__MVS__)
844 if (errno == ENXIO)
845 return UV_ENODEV;
846 #endif
847 return UV__ERR(errno);
848 }
849
850 return 0;
851 }
852
853
854 #if !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__ANDROID__)
uv__udp_set_source_membership4(uv_udp_t * handle,const struct sockaddr_in * multicast_addr,const char * interface_addr,const struct sockaddr_in * source_addr,uv_membership membership)855 static int uv__udp_set_source_membership4(uv_udp_t* handle,
856 const struct sockaddr_in* multicast_addr,
857 const char* interface_addr,
858 const struct sockaddr_in* source_addr,
859 uv_membership membership) {
860 struct ip_mreq_source mreq;
861 int optname;
862 int err;
863
864 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
865 if (err)
866 return err;
867
868 memset(&mreq, 0, sizeof(mreq));
869
870 if (interface_addr != NULL) {
871 err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
872 if (err)
873 return err;
874 } else {
875 mreq.imr_interface.s_addr = htonl(INADDR_ANY);
876 }
877
878 mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
879 mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
880
881 if (membership == UV_JOIN_GROUP)
882 optname = IP_ADD_SOURCE_MEMBERSHIP;
883 else if (membership == UV_LEAVE_GROUP)
884 optname = IP_DROP_SOURCE_MEMBERSHIP;
885 else
886 return UV_EINVAL;
887
888 if (setsockopt(handle->io_watcher.fd,
889 IPPROTO_IP,
890 optname,
891 &mreq,
892 sizeof(mreq))) {
893 return UV__ERR(errno);
894 }
895
896 return 0;
897 }
898
899
uv__udp_set_source_membership6(uv_udp_t * handle,const struct sockaddr_in6 * multicast_addr,const char * interface_addr,const struct sockaddr_in6 * source_addr,uv_membership membership)900 static int uv__udp_set_source_membership6(uv_udp_t* handle,
901 const struct sockaddr_in6* multicast_addr,
902 const char* interface_addr,
903 const struct sockaddr_in6* source_addr,
904 uv_membership membership) {
905 struct group_source_req mreq;
906 struct sockaddr_in6 addr6;
907 int optname;
908 int err;
909
910 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
911 if (err)
912 return err;
913
914 memset(&mreq, 0, sizeof(mreq));
915
916 if (interface_addr != NULL) {
917 err = uv_ip6_addr(interface_addr, 0, &addr6);
918 if (err)
919 return err;
920 mreq.gsr_interface = addr6.sin6_scope_id;
921 } else {
922 mreq.gsr_interface = 0;
923 }
924
925 memcpy(&mreq.gsr_group, multicast_addr, sizeof(mreq.gsr_group));
926 memcpy(&mreq.gsr_source, source_addr, sizeof(mreq.gsr_source));
927
928 if (membership == UV_JOIN_GROUP)
929 optname = MCAST_JOIN_SOURCE_GROUP;
930 else if (membership == UV_LEAVE_GROUP)
931 optname = MCAST_LEAVE_SOURCE_GROUP;
932 else
933 return UV_EINVAL;
934
935 if (setsockopt(handle->io_watcher.fd,
936 IPPROTO_IPV6,
937 optname,
938 &mreq,
939 sizeof(mreq))) {
940 return UV__ERR(errno);
941 }
942
943 return 0;
944 }
945 #endif
946
947
uv_udp_init_ex(uv_loop_t * loop,uv_udp_t * handle,unsigned int flags)948 int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) {
949 int domain;
950 int err;
951 int fd;
952
953 /* Use the lower 8 bits for the domain */
954 domain = flags & 0xFF;
955 if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
956 return UV_EINVAL;
957
958 if (flags & ~0xFF)
959 return UV_EINVAL;
960
961 if (domain != AF_UNSPEC) {
962 err = uv__socket(domain, SOCK_DGRAM, 0);
963 if (err < 0)
964 return err;
965 fd = err;
966 } else {
967 fd = -1;
968 }
969
970 uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
971 handle->alloc_cb = NULL;
972 handle->recv_cb = NULL;
973 handle->send_queue_size = 0;
974 handle->send_queue_count = 0;
975 uv__io_init(&handle->io_watcher, uv__udp_io, fd);
976 QUEUE_INIT(&handle->write_queue);
977 QUEUE_INIT(&handle->write_completed_queue);
978
979 return 0;
980 }
981
982
uv_udp_init(uv_loop_t * loop,uv_udp_t * handle)983 int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) {
984 return uv_udp_init_ex(loop, handle, AF_UNSPEC);
985 }
986
987
uv_udp_open(uv_udp_t * handle,uv_os_sock_t sock)988 int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
989 int err;
990
991 /* Check for already active socket. */
992 if (handle->io_watcher.fd != -1)
993 return UV_EBUSY;
994
995 if (uv__fd_exists(handle->loop, sock))
996 return UV_EEXIST;
997
998 err = uv__nonblock(sock, 1);
999 if (err)
1000 return err;
1001
1002 err = uv__set_reuse(sock);
1003 if (err)
1004 return err;
1005
1006 handle->io_watcher.fd = sock;
1007 if (uv__udp_is_connected(handle))
1008 handle->flags |= UV_HANDLE_UDP_CONNECTED;
1009
1010 return 0;
1011 }
1012
1013
uv_udp_set_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,uv_membership membership)1014 int uv_udp_set_membership(uv_udp_t* handle,
1015 const char* multicast_addr,
1016 const char* interface_addr,
1017 uv_membership membership) {
1018 int err;
1019 struct sockaddr_in addr4;
1020 struct sockaddr_in6 addr6;
1021
1022 if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
1023 err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
1024 if (err)
1025 return err;
1026 return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
1027 } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
1028 err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
1029 if (err)
1030 return err;
1031 return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
1032 } else {
1033 return UV_EINVAL;
1034 }
1035 }
1036
1037
uv_udp_set_source_membership(uv_udp_t * handle,const char * multicast_addr,const char * interface_addr,const char * source_addr,uv_membership membership)1038 int uv_udp_set_source_membership(uv_udp_t* handle,
1039 const char* multicast_addr,
1040 const char* interface_addr,
1041 const char* source_addr,
1042 uv_membership membership) {
1043 #if !defined(__OpenBSD__) && !defined(__NetBSD__) && !defined(__ANDROID__)
1044 int err;
1045 struct sockaddr_storage mcast_addr;
1046 struct sockaddr_in* mcast_addr4;
1047 struct sockaddr_in6* mcast_addr6;
1048 struct sockaddr_storage src_addr;
1049 struct sockaddr_in* src_addr4;
1050 struct sockaddr_in6* src_addr6;
1051
1052 mcast_addr4 = (struct sockaddr_in*)&mcast_addr;
1053 mcast_addr6 = (struct sockaddr_in6*)&mcast_addr;
1054 src_addr4 = (struct sockaddr_in*)&src_addr;
1055 src_addr6 = (struct sockaddr_in6*)&src_addr;
1056
1057 err = uv_ip4_addr(multicast_addr, 0, mcast_addr4);
1058 if (err) {
1059 err = uv_ip6_addr(multicast_addr, 0, mcast_addr6);
1060 if (err)
1061 return err;
1062 err = uv_ip6_addr(source_addr, 0, src_addr6);
1063 if (err)
1064 return err;
1065 return uv__udp_set_source_membership6(handle,
1066 mcast_addr6,
1067 interface_addr,
1068 src_addr6,
1069 membership);
1070 }
1071
1072 err = uv_ip4_addr(source_addr, 0, src_addr4);
1073 if (err)
1074 return err;
1075 return uv__udp_set_source_membership4(handle,
1076 mcast_addr4,
1077 interface_addr,
1078 src_addr4,
1079 membership);
1080 #else
1081 return UV_ENOSYS;
1082 #endif
1083 }
1084
1085
uv__setsockopt(uv_udp_t * handle,int option4,int option6,const void * val,socklen_t size)1086 static int uv__setsockopt(uv_udp_t* handle,
1087 int option4,
1088 int option6,
1089 const void* val,
1090 socklen_t size) {
1091 int r;
1092
1093 if (handle->flags & UV_HANDLE_IPV6)
1094 r = setsockopt(handle->io_watcher.fd,
1095 IPPROTO_IPV6,
1096 option6,
1097 val,
1098 size);
1099 else
1100 r = setsockopt(handle->io_watcher.fd,
1101 IPPROTO_IP,
1102 option4,
1103 val,
1104 size);
1105 if (r)
1106 return UV__ERR(errno);
1107
1108 return 0;
1109 }
1110
uv__setsockopt_maybe_char(uv_udp_t * handle,int option4,int option6,int val)1111 static int uv__setsockopt_maybe_char(uv_udp_t* handle,
1112 int option4,
1113 int option6,
1114 int val) {
1115 #if defined(__sun) || defined(_AIX) || defined(__MVS__)
1116 char arg = val;
1117 #elif defined(__OpenBSD__)
1118 unsigned char arg = val;
1119 #else
1120 int arg = val;
1121 #endif
1122
1123 if (val < 0 || val > 255)
1124 return UV_EINVAL;
1125
1126 return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
1127 }
1128
1129
uv_udp_set_broadcast(uv_udp_t * handle,int on)1130 int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
1131 if (setsockopt(handle->io_watcher.fd,
1132 SOL_SOCKET,
1133 SO_BROADCAST,
1134 &on,
1135 sizeof(on))) {
1136 return UV__ERR(errno);
1137 }
1138
1139 return 0;
1140 }
1141
1142
uv_udp_set_ttl(uv_udp_t * handle,int ttl)1143 int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
1144 if (ttl < 1 || ttl > 255)
1145 return UV_EINVAL;
1146
1147 #if defined(__MVS__)
1148 if (!(handle->flags & UV_HANDLE_IPV6))
1149 return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */
1150 #endif
1151
1152 /*
1153 * On Solaris and derivatives such as SmartOS, the length of socket options
1154 * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
1155 * so hardcode the size of these options on this platform,
1156 * and use the general uv__setsockopt_maybe_char call on other platforms.
1157 */
1158 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1159 defined(__MVS__)
1160
1161 return uv__setsockopt(handle,
1162 IP_TTL,
1163 IPV6_UNICAST_HOPS,
1164 &ttl,
1165 sizeof(ttl));
1166
1167 #else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1168 defined(__MVS__)) */
1169
1170 return uv__setsockopt_maybe_char(handle,
1171 IP_TTL,
1172 IPV6_UNICAST_HOPS,
1173 ttl);
1174
1175 #endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
1176 defined(__MVS__) */
1177 }
1178
1179
uv_udp_set_multicast_ttl(uv_udp_t * handle,int ttl)1180 int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
1181 /*
1182 * On Solaris and derivatives such as SmartOS, the length of socket options
1183 * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
1184 * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
1185 * and use the general uv__setsockopt_maybe_char call otherwise.
1186 */
1187 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1188 defined(__MVS__)
1189 if (handle->flags & UV_HANDLE_IPV6)
1190 return uv__setsockopt(handle,
1191 IP_MULTICAST_TTL,
1192 IPV6_MULTICAST_HOPS,
1193 &ttl,
1194 sizeof(ttl));
1195 #endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1196 defined(__MVS__) */
1197
1198 return uv__setsockopt_maybe_char(handle,
1199 IP_MULTICAST_TTL,
1200 IPV6_MULTICAST_HOPS,
1201 ttl);
1202 }
1203
1204
uv_udp_set_multicast_loop(uv_udp_t * handle,int on)1205 int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
1206 /*
1207 * On Solaris and derivatives such as SmartOS, the length of socket options
1208 * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
1209 * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
1210 * and use the general uv__setsockopt_maybe_char call otherwise.
1211 */
1212 #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
1213 defined(__MVS__)
1214 if (handle->flags & UV_HANDLE_IPV6)
1215 return uv__setsockopt(handle,
1216 IP_MULTICAST_LOOP,
1217 IPV6_MULTICAST_LOOP,
1218 &on,
1219 sizeof(on));
1220 #endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
1221 defined(__MVS__) */
1222
1223 return uv__setsockopt_maybe_char(handle,
1224 IP_MULTICAST_LOOP,
1225 IPV6_MULTICAST_LOOP,
1226 on);
1227 }
1228
uv_udp_set_multicast_interface(uv_udp_t * handle,const char * interface_addr)1229 int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
1230 struct sockaddr_storage addr_st;
1231 struct sockaddr_in* addr4;
1232 struct sockaddr_in6* addr6;
1233
1234 addr4 = (struct sockaddr_in*) &addr_st;
1235 addr6 = (struct sockaddr_in6*) &addr_st;
1236
1237 if (!interface_addr) {
1238 memset(&addr_st, 0, sizeof addr_st);
1239 if (handle->flags & UV_HANDLE_IPV6) {
1240 addr_st.ss_family = AF_INET6;
1241 addr6->sin6_scope_id = 0;
1242 } else {
1243 addr_st.ss_family = AF_INET;
1244 addr4->sin_addr.s_addr = htonl(INADDR_ANY);
1245 }
1246 } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
1247 /* nothing, address was parsed */
1248 } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
1249 /* nothing, address was parsed */
1250 } else {
1251 return UV_EINVAL;
1252 }
1253
1254 if (addr_st.ss_family == AF_INET) {
1255 if (setsockopt(handle->io_watcher.fd,
1256 IPPROTO_IP,
1257 IP_MULTICAST_IF,
1258 (void*) &addr4->sin_addr,
1259 sizeof(addr4->sin_addr)) == -1) {
1260 return UV__ERR(errno);
1261 }
1262 } else if (addr_st.ss_family == AF_INET6) {
1263 if (setsockopt(handle->io_watcher.fd,
1264 IPPROTO_IPV6,
1265 IPV6_MULTICAST_IF,
1266 &addr6->sin6_scope_id,
1267 sizeof(addr6->sin6_scope_id)) == -1) {
1268 return UV__ERR(errno);
1269 }
1270 } else {
1271 assert(0 && "unexpected address family");
1272 abort();
1273 }
1274
1275 return 0;
1276 }
1277
uv_udp_getpeername(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1278 int uv_udp_getpeername(const uv_udp_t* handle,
1279 struct sockaddr* name,
1280 int* namelen) {
1281
1282 return uv__getsockpeername((const uv_handle_t*) handle,
1283 getpeername,
1284 name,
1285 namelen);
1286 }
1287
uv_udp_getsockname(const uv_udp_t * handle,struct sockaddr * name,int * namelen)1288 int uv_udp_getsockname(const uv_udp_t* handle,
1289 struct sockaddr* name,
1290 int* namelen) {
1291
1292 return uv__getsockpeername((const uv_handle_t*) handle,
1293 getsockname,
1294 name,
1295 namelen);
1296 }
1297
1298
uv__udp_recv_start(uv_udp_t * handle,uv_alloc_cb alloc_cb,uv_udp_recv_cb recv_cb)1299 int uv__udp_recv_start(uv_udp_t* handle,
1300 uv_alloc_cb alloc_cb,
1301 uv_udp_recv_cb recv_cb) {
1302 int err;
1303
1304 if (alloc_cb == NULL || recv_cb == NULL)
1305 return UV_EINVAL;
1306
1307 if (uv__io_active(&handle->io_watcher, POLLIN))
1308 return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */
1309
1310 err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
1311 if (err)
1312 return err;
1313
1314 handle->alloc_cb = alloc_cb;
1315 handle->recv_cb = recv_cb;
1316
1317 uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
1318 uv__handle_start(handle);
1319
1320 return 0;
1321 }
1322
1323
uv__udp_recv_stop(uv_udp_t * handle)1324 int uv__udp_recv_stop(uv_udp_t* handle) {
1325 uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
1326
1327 if (!uv__io_active(&handle->io_watcher, POLLOUT))
1328 uv__handle_stop(handle);
1329
1330 handle->alloc_cb = NULL;
1331 handle->recv_cb = NULL;
1332
1333 return 0;
1334 }
1335