1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/igmp.h"
48 #include "lwip/inet.h"
49 #include "lwip/tcp.h"
50 #include "lwip/raw.h"
51 #include "lwip/udp.h"
52 #include "lwip/memp.h"
53 #include "lwip/pbuf.h"
54 #include "lwip/netif.h"
55 #include "lwip/priv/tcpip_priv.h"
56 #include "lwip/mld6.h"
57 #if LWIP_ENABLE_DISTRIBUTED_NET
58 #include "lwip/distributed_net/distributed_net.h"
59 #include "lwip/distributed_net/distributed_net_core.h"
60 #endif /* LWIP_ENABLE_DISTRIBUTED_NET */
61 #if LWIP_CHECKSUM_ON_COPY
62 #include "lwip/inet_chksum.h"
63 #endif
64
65 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
66 #include <stdarg.h>
67 #endif
68
69 #include <string.h>
70
71 #ifdef LWIP_HOOK_FILENAME
72 #include LWIP_HOOK_FILENAME
73 #endif
74
75 /* If the netconn API is not required publicly, then we include the necessary
76 files here to get the implementation */
77 #if !LWIP_NETCONN
78 #undef LWIP_NETCONN
79 #define LWIP_NETCONN 1
80 #include "api_msg.c"
81 #include "api_lib.c"
82 #include "netbuf.c"
83 #undef LWIP_NETCONN
84 #define LWIP_NETCONN 0
85 #endif
86
87 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
88 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
89 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
90 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
91
92 #if LWIP_IPV4
93 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
94 (sin)->sin_len = sizeof(struct sockaddr_in); \
95 (sin)->sin_family = AF_INET; \
96 (sin)->sin_port = lwip_htons((port)); \
97 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
98 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
99 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
100 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
101 (port) = lwip_ntohs((sin)->sin_port); }while(0)
102 #endif /* LWIP_IPV4 */
103
104 #if LWIP_IPV6
105 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
106 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
107 (sin6)->sin6_family = AF_INET6; \
108 (sin6)->sin6_port = lwip_htons((port)); \
109 (sin6)->sin6_flowinfo = 0; \
110 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
111 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
112 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
113 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
114 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
115 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
116 } \
117 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
118 #endif /* LWIP_IPV6 */
119
120 #if LWIP_IPV4 && LWIP_IPV6
121 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
122
123 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
124 ((namelen) == sizeof(struct sockaddr_in6)))
125 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
126 ((name)->sa_family == AF_INET6))
127 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
128 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
129 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
130 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
131 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
132 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
133 } else { \
134 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
135 } } while(0)
136 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
137 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
138 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
139 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
140 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
141 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
142 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
143 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
144 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
145 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
146 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
147 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
148 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
149 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
150 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
151 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
152 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
153 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
154 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
155 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
156 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
157 #endif /* LWIP_IPV6 */
158
159 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
160 IS_SOCK_ADDR_TYPE_VALID(name))
161 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
162 SOCK_ADDR_TYPE_MATCH(name, sock))
163 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
164
165
166 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
167 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
168 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
169 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
170 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
171 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
172 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
173 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
174 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
175 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
176
177
178 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
179 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
180 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
181 #if LWIP_MPU_COMPATIBLE
182 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
183 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
184 if (name == NULL) { \
185 sock_set_errno(sock, ENOMEM); \
186 done_socket(sock); \
187 return -1; \
188 } }while(0)
189 #else /* LWIP_MPU_COMPATIBLE */
190 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
191 #endif /* LWIP_MPU_COMPATIBLE */
192
193 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
194 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
195 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
196 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
197 #else
198 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
199 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
200 u32_t loc = (val); \
201 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
202 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
203 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
204 #endif
205
206
207 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
208 * sockaddr_in6 if instantiated.
209 */
210 union sockaddr_aligned {
211 struct sockaddr sa;
212 #if LWIP_IPV6
213 struct sockaddr_in6 sin6;
214 #endif /* LWIP_IPV6 */
215 #if LWIP_IPV4
216 struct sockaddr_in sin;
217 #endif /* LWIP_IPV4 */
218 };
219
220 /* Define the number of IPv4 multicast memberships, default is one per socket */
221 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
222 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
223 #endif
224
225 #if LWIP_IGMP
226 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
227 a socket is closed */
228 struct lwip_socket_multicast_pair {
229 /** the socket */
230 struct lwip_sock *sock;
231 /** the interface address */
232 ip4_addr_t if_addr;
233 /** the group address */
234 ip4_addr_t multi_addr;
235 };
236
237 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
238
239 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
240 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
241 static void lwip_socket_drop_registered_memberships(int s);
242 #endif /* LWIP_IGMP */
243
244 #if LWIP_IPV6_MLD
245 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
246 a socket is closed */
247 struct lwip_socket_multicast_mld6_pair {
248 /** the socket */
249 struct lwip_sock *sock;
250 /** the interface index */
251 u8_t if_idx;
252 /** the group address */
253 ip6_addr_t multi_addr;
254 };
255
256 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
257
258 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
259 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
260 static void lwip_socket_drop_registered_mld6_memberships(int s);
261 #endif /* LWIP_IPV6_MLD */
262
263 /** The global array of available sockets */
264 static struct lwip_sock sockets[NUM_SOCKETS];
265
266 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
267 #if LWIP_TCPIP_CORE_LOCKING
268 /* protect the select_cb_list using core lock */
269 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
270 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
271 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
272 #else /* LWIP_TCPIP_CORE_LOCKING */
273 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
274 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
275 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
276 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
277 /** This counter is increased from lwip_select when the list is changed
278 and checked in select_check_waiters to see if it has changed. */
279 static volatile int select_cb_ctr;
280 #endif /* LWIP_TCPIP_CORE_LOCKING */
281 /** The global list of tasks waiting for select */
282 static struct lwip_select_cb *select_cb_list;
283 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
284
285 #define sock_set_errno(sk, e) do { \
286 const int sockerr = (e); \
287 set_errno(sockerr); \
288 } while (0)
289
290 /* Forward declaration of some functions */
291 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
292 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
293 #define DEFAULT_SOCKET_EVENTCB event_callback
294 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
295 #else
296 #define DEFAULT_SOCKET_EVENTCB NULL
297 #endif
298 #if !LWIP_TCPIP_CORE_LOCKING
299 static void lwip_getsockopt_callback(void *arg);
300 static void lwip_setsockopt_callback(void *arg);
301 #endif
302 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
303 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
304 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
305 union lwip_sock_lastdata *lastdata);
306 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
307
308 #if LWIP_IPV4 && LWIP_IPV6
309 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)310 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
311 {
312 if ((sockaddr->sa_family) == AF_INET6) {
313 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
314 ipaddr->type = IPADDR_TYPE_V6;
315 } else {
316 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
317 ipaddr->type = IPADDR_TYPE_V4;
318 }
319 }
320 #endif /* LWIP_IPV4 && LWIP_IPV6 */
321
322 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
323 void
lwip_socket_thread_init(void)324 lwip_socket_thread_init(void)
325 {
326 netconn_thread_init();
327 }
328
329 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
330 void
lwip_socket_thread_cleanup(void)331 lwip_socket_thread_cleanup(void)
332 {
333 netconn_thread_cleanup();
334 }
335
336 #if LWIP_NETCONN_FULLDUPLEX
337 /* Thread-safe increment of sock->fd_used, with overflow check */
338 static int
sock_inc_used(struct lwip_sock * sock)339 sock_inc_used(struct lwip_sock *sock)
340 {
341 int ret;
342 SYS_ARCH_DECL_PROTECT(lev);
343
344 LWIP_ASSERT("sock != NULL", sock != NULL);
345
346 SYS_ARCH_PROTECT(lev);
347 if (sock->fd_free_pending) {
348 /* prevent new usage of this socket if free is pending */
349 ret = 0;
350 } else {
351 ++sock->fd_used;
352 ret = 1;
353 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
354 }
355 SYS_ARCH_UNPROTECT(lev);
356 return ret;
357 }
358
359 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
360 static int
sock_inc_used_locked(struct lwip_sock * sock)361 sock_inc_used_locked(struct lwip_sock *sock)
362 {
363 LWIP_ASSERT("sock != NULL", sock != NULL);
364
365 if (sock->fd_free_pending) {
366 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
367 return 0;
368 }
369
370 ++sock->fd_used;
371 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
372 return 1;
373 }
374
375 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
376 * released (and possibly reused) when used from more than one thread
377 * (e.g. read-while-write or close-while-write, etc)
378 * This function is called at the end of functions using (try)get_socket*().
379 */
380 static void
done_socket(struct lwip_sock * sock)381 done_socket(struct lwip_sock *sock)
382 {
383 int freed = 0;
384 int is_tcp = 0;
385 struct netconn *conn = NULL;
386 union lwip_sock_lastdata lastdata;
387 SYS_ARCH_DECL_PROTECT(lev);
388 LWIP_ASSERT("sock != NULL", sock != NULL);
389
390 SYS_ARCH_PROTECT(lev);
391 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
392 if (--sock->fd_used == 0) {
393 if (sock->fd_free_pending) {
394 /* free the socket */
395 sock->fd_used = 1;
396 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
397 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
398 }
399 }
400 SYS_ARCH_UNPROTECT(lev);
401
402 if (freed) {
403 free_socket_free_elements(is_tcp, conn, &lastdata);
404 }
405 }
406
407 #else /* LWIP_NETCONN_FULLDUPLEX */
408 #define sock_inc_used(sock) 1
409 #define sock_inc_used_locked(sock) 1
410 #define done_socket(sock)
411 #endif /* LWIP_NETCONN_FULLDUPLEX */
412
413 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
414 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)415 tryget_socket_unconn_nouse(int fd)
416 {
417 int s = fd - LWIP_SOCKET_OFFSET;
418 if ((s < 0) || (s >= NUM_SOCKETS)) {
419 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
420 return NULL;
421 }
422 return &sockets[s];
423 }
424
425 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)426 lwip_socket_dbg_get_socket(int fd)
427 {
428 return tryget_socket_unconn_nouse(fd);
429 }
430
431 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
432 static struct lwip_sock *
tryget_socket_unconn(int fd)433 tryget_socket_unconn(int fd)
434 {
435 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
436 if (ret != NULL) {
437 if (!sock_inc_used(ret)) {
438 return NULL;
439 }
440 }
441 return ret;
442 }
443
444 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
445 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)446 tryget_socket_unconn_locked(int fd)
447 {
448 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
449 if (ret != NULL) {
450 if (!sock_inc_used_locked(ret)) {
451 return NULL;
452 }
453 }
454 return ret;
455 }
456
457 /**
458 * Same as get_socket but doesn't set errno
459 *
460 * @param fd externally used socket index
461 * @return struct lwip_sock for the socket or NULL if not found
462 */
463 static struct lwip_sock *
tryget_socket(int fd)464 tryget_socket(int fd)
465 {
466 struct lwip_sock *sock = tryget_socket_unconn(fd);
467 if (sock != NULL) {
468 if (sock->conn) {
469 return sock;
470 }
471 done_socket(sock);
472 }
473 return NULL;
474 }
475
476 /**
477 * Map a externally used socket index to the internal socket representation.
478 *
479 * @param fd externally used socket index
480 * @return struct lwip_sock for the socket or NULL if not found
481 */
482 static struct lwip_sock *
get_socket(int fd)483 get_socket(int fd)
484 {
485 struct lwip_sock *sock = tryget_socket(fd);
486 if (!sock) {
487 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
488 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
489 }
490 set_errno(EBADF);
491 return NULL;
492 }
493 return sock;
494 }
495
496 /**
497 * Allocate a new socket for a given netconn.
498 *
499 * @param newconn the netconn for which to allocate a socket
500 * @param accepted 1 if socket has been created by accept(),
501 * 0 if socket has been created by socket()
502 * @return the index of the new socket; -1 on error
503 */
504 static int
alloc_socket(struct netconn * newconn,int accepted)505 alloc_socket(struct netconn *newconn, int accepted)
506 {
507 int i;
508 SYS_ARCH_DECL_PROTECT(lev);
509 LWIP_UNUSED_ARG(accepted);
510
511 /* allocate a new socket identifier */
512 for (i = 0; i < NUM_SOCKETS; ++i) {
513 /* Protect socket array */
514 SYS_ARCH_PROTECT(lev);
515 if (!sockets[i].conn) {
516 #if LWIP_NETCONN_FULLDUPLEX
517 if (sockets[i].fd_used) {
518 SYS_ARCH_UNPROTECT(lev);
519 continue;
520 }
521 sockets[i].fd_used = 1;
522 sockets[i].fd_free_pending = 0;
523 #endif
524 sockets[i].conn = newconn;
525 /* The socket is not yet known to anyone, so no need to protect
526 after having marked it as used. */
527 SYS_ARCH_UNPROTECT(lev);
528 sockets[i].lastdata.pbuf = NULL;
529 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
530 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
531 sockets[i].rcvevent = 0;
532 /* TCP sendbuf is empty, but the socket is not yet writable until connected
533 * (unless it has been created by accept()). */
534 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
535 sockets[i].errevent = 0;
536 init_waitqueue_head(&sockets[i].wq);
537 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
538 return i + LWIP_SOCKET_OFFSET;
539 }
540 SYS_ARCH_UNPROTECT(lev);
541 }
542 return -1;
543 }
544
545 /** Free a socket (under lock)
546 *
547 * @param sock the socket to free
548 * @param is_tcp != 0 for TCP sockets, used to free lastdata
549 * @param conn the socekt's netconn is stored here, must be freed externally
550 * @param lastdata lastdata is stored here, must be freed externally
551 */
552 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)553 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
554 union lwip_sock_lastdata *lastdata)
555 {
556 #if LWIP_NETCONN_FULLDUPLEX
557 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
558 sock->fd_used--;
559 if (sock->fd_used > 0) {
560 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
561 return 0;
562 }
563 #else /* LWIP_NETCONN_FULLDUPLEX */
564 LWIP_UNUSED_ARG(is_tcp);
565 #endif /* LWIP_NETCONN_FULLDUPLEX */
566
567 *lastdata = sock->lastdata;
568 sock->lastdata.pbuf = NULL;
569 *conn = sock->conn;
570 sock->conn = NULL;
571 return 1;
572 }
573
574 /** Free a socket's leftover members.
575 */
576 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)577 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
578 {
579 if (lastdata->pbuf != NULL) {
580 if (is_tcp) {
581 pbuf_free(lastdata->pbuf);
582 } else {
583 netbuf_delete(lastdata->netbuf);
584 }
585 }
586 if (conn != NULL) {
587 /* netconn_prepare_delete() has already been called, here we only free the conn */
588 netconn_delete(conn);
589 }
590 }
591
592 /** Free a socket. The socket's netconn must have been
593 * delete before!
594 *
595 * @param sock the socket to free
596 * @param is_tcp != 0 for TCP sockets, used to free lastdata
597 */
598 static void
free_socket(struct lwip_sock * sock,int is_tcp)599 free_socket(struct lwip_sock *sock, int is_tcp)
600 {
601 int freed;
602 struct netconn *conn;
603 union lwip_sock_lastdata lastdata;
604 SYS_ARCH_DECL_PROTECT(lev);
605
606 /* Protect socket array */
607 SYS_ARCH_PROTECT(lev);
608
609 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
610 SYS_ARCH_UNPROTECT(lev);
611 /* don't use 'sock' after this line, as another task might have allocated it */
612
613 if (freed) {
614 free_socket_free_elements(is_tcp, conn, &lastdata);
615 }
616 }
617
618 /* Below this, the well-known socket functions are implemented.
619 * Use google.com or opengroup.org to get a good description :-)
620 *
621 * Exceptions are documented!
622 */
623
624 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)625 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
626 {
627 struct lwip_sock *sock, *nsock;
628 struct netconn *newconn;
629 ip_addr_t naddr;
630 u16_t port = 0;
631 int newsock;
632 err_t err;
633 int recvevent;
634 SYS_ARCH_DECL_PROTECT(lev);
635
636 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
637 sock = get_socket(s);
638 if (!sock) {
639 return -1;
640 }
641
642 /* wait for a new connection */
643 err = netconn_accept(sock->conn, &newconn);
644 if (err != ERR_OK) {
645 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
646 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
647 sock_set_errno(sock, EOPNOTSUPP);
648 } else if (err == ERR_CLSD) {
649 sock_set_errno(sock, EINVAL);
650 } else {
651 sock_set_errno(sock, err_to_errno(err));
652 }
653 done_socket(sock);
654 return -1;
655 }
656 LWIP_ASSERT("newconn != NULL", newconn != NULL);
657
658 newsock = alloc_socket(newconn, 1);
659 if (newsock == -1) {
660 netconn_delete(newconn);
661 sock_set_errno(sock, ENFILE);
662 done_socket(sock);
663 return -1;
664 }
665 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
666 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
667
668 /* See event_callback: If data comes in right away after an accept, even
669 * though the server task might not have created a new socket yet.
670 * In that case, newconn->socket is counted down (newconn->socket--),
671 * so nsock->rcvevent is >= 1 here!
672 */
673 SYS_ARCH_PROTECT(lev);
674 recvevent = (s16_t)(-1 - newconn->socket);
675 newconn->socket = newsock;
676 SYS_ARCH_UNPROTECT(lev);
677
678 if (newconn->callback) {
679 LOCK_TCPIP_CORE();
680 while (recvevent > 0) {
681 recvevent--;
682 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
683 }
684 UNLOCK_TCPIP_CORE();
685 }
686
687 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
688 * not be NULL if addr is valid.
689 */
690 if ((addr != NULL) && (addrlen != NULL)) {
691 union sockaddr_aligned tempaddr;
692 /* get the IP address and port of the remote host */
693 err = netconn_peer(newconn, &naddr, &port);
694 if (err != ERR_OK) {
695 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
696 free_socket(nsock, 1);
697 sock_set_errno(sock, err_to_errno(err));
698 done_socket(sock);
699 return -1;
700 }
701
702 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
703 if (*addrlen > tempaddr.sa.sa_len) {
704 *addrlen = tempaddr.sa.sa_len;
705 }
706 MEMCPY(addr, &tempaddr, *addrlen);
707
708 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
709 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
710 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
711 } else {
712 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
713 }
714
715 sock_set_errno(sock, 0);
716 done_socket(sock);
717 done_socket(nsock);
718 return newsock;
719 }
720
721 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)722 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
723 {
724 struct lwip_sock *sock;
725 ip_addr_t local_addr;
726 u16_t local_port;
727 err_t err;
728
729 sock = get_socket(s);
730 if (!sock) {
731 return -1;
732 }
733
734 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
735 /* sockaddr does not match socket type (IPv4/IPv6) */
736 sock_set_errno(sock, err_to_errno(ERR_VAL));
737 done_socket(sock);
738 return -1;
739 }
740
741 /* check size, family and alignment of 'name' */
742 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
743 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
744 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
745 LWIP_UNUSED_ARG(namelen);
746
747 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
748 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
749 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
750 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
751
752 #if LWIP_IPV4 && LWIP_IPV6
753 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
754 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
755 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
756 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
757 }
758 #endif /* LWIP_IPV4 && LWIP_IPV6 */
759
760 err = netconn_bind(sock->conn, &local_addr, local_port);
761
762 if (err != ERR_OK) {
763 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
764 sock_set_errno(sock, err_to_errno(err));
765 done_socket(sock);
766 return -1;
767 }
768
769 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
770 sock_set_errno(sock, 0);
771 done_socket(sock);
772 return 0;
773 }
774
775 int
lwip_close(int s)776 lwip_close(int s)
777 {
778 #if LWIP_ENABLE_DISTRIBUTED_NET
779 if (!is_distributed_net_enabled()) {
780 return lwip_close_internal(s);
781 }
782 return distributed_net_close(s);
783 }
784
785 int
lwip_close_internal(int s)786 lwip_close_internal(int s)
787 {
788 #endif
789 struct lwip_sock *sock;
790 int is_tcp = 0;
791 err_t err;
792
793 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
794
795 sock = get_socket(s);
796 if (!sock) {
797 return -1;
798 }
799
800 if (sock->conn != NULL) {
801 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
802 } else {
803 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
804 }
805
806 #if LWIP_IGMP
807 /* drop all possibly joined IGMP memberships */
808 lwip_socket_drop_registered_memberships(s);
809 #endif /* LWIP_IGMP */
810 #if LWIP_IPV6_MLD
811 /* drop all possibly joined MLD6 memberships */
812 lwip_socket_drop_registered_mld6_memberships(s);
813 #endif /* LWIP_IPV6_MLD */
814
815 err = netconn_prepare_delete(sock->conn);
816 if (err != ERR_OK) {
817 sock_set_errno(sock, err_to_errno(err));
818 done_socket(sock);
819 return -1;
820 }
821
822 free_socket(sock, is_tcp);
823 set_errno(0);
824 return 0;
825 }
826
827 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)828 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
829 {
830 #if LWIP_ENABLE_DISTRIBUTED_NET
831 if (!is_distributed_net_enabled()) {
832 return lwip_connect_internal(s, name, namelen);
833 }
834 return distributed_net_connect(s, name, namelen);
835 }
836
837 int
lwip_connect_internal(int s,const struct sockaddr * name,socklen_t namelen)838 lwip_connect_internal(int s, const struct sockaddr *name, socklen_t namelen)
839 {
840 #endif
841 struct lwip_sock *sock;
842 err_t err;
843
844 sock = get_socket(s);
845 if (!sock) {
846 return -1;
847 }
848
849 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
850 /* sockaddr does not match socket type (IPv4/IPv6) */
851 sock_set_errno(sock, err_to_errno(ERR_VAL));
852 done_socket(sock);
853 return -1;
854 }
855
856 LWIP_UNUSED_ARG(namelen);
857 if (name->sa_family == AF_UNSPEC) {
858 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
859 err = netconn_disconnect(sock->conn);
860 } else {
861 ip_addr_t remote_addr;
862 u16_t remote_port;
863
864 /* check size, family and alignment of 'name' */
865 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
866 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
867 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
868
869 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
870 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
871 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
872 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
873
874 #if LWIP_IPV4 && LWIP_IPV6
875 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
876 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
877 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
878 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
879 }
880 #endif /* LWIP_IPV4 && LWIP_IPV6 */
881
882 err = netconn_connect(sock->conn, &remote_addr, remote_port);
883 }
884
885 if (err != ERR_OK) {
886 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
887 sock_set_errno(sock, err_to_errno(err));
888 done_socket(sock);
889 return -1;
890 }
891
892 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
893 sock_set_errno(sock, 0);
894 done_socket(sock);
895 return 0;
896 }
897
898 /**
899 * Set a socket into listen mode.
900 * The socket may not have been used for another connection previously.
901 *
902 * @param s the socket to set to listening mode
903 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
904 * @return 0 on success, non-zero on failure
905 */
906 int
lwip_listen(int s,int backlog)907 lwip_listen(int s, int backlog)
908 {
909 struct lwip_sock *sock;
910 err_t err;
911
912 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
913
914 sock = get_socket(s);
915 if (!sock) {
916 return -1;
917 }
918
919 /* limit the "backlog" parameter to fit in an u8_t */
920 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
921
922 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
923
924 if (err != ERR_OK) {
925 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
926 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
927 sock_set_errno(sock, EOPNOTSUPP);
928 } else {
929 sock_set_errno(sock, err_to_errno(err));
930 }
931 done_socket(sock);
932 return -1;
933 }
934
935 sock_set_errno(sock, 0);
936 done_socket(sock);
937 return 0;
938 }
939
940 #if LWIP_TCP
941 /* Helper function to loop over receiving pbufs from netconn
942 * until "len" bytes are received or we're otherwise done.
943 * Keeps sock->lastdata for peeking or partly copying.
944 */
945 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)946 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
947 {
948 u8_t apiflags = NETCONN_NOAUTORCVD;
949 ssize_t recvd = 0;
950 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
951
952 LWIP_ASSERT("no socket given", sock != NULL);
953 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
954
955 if (flags & MSG_DONTWAIT) {
956 apiflags |= NETCONN_DONTBLOCK;
957 }
958
959 do {
960 struct pbuf *p;
961 err_t err;
962 u16_t copylen;
963
964 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
965 /* Check if there is data left from the last recv operation. */
966 if (sock->lastdata.pbuf) {
967 p = sock->lastdata.pbuf;
968 } else {
969 /* No data was left from the previous operation, so we try to get
970 some from the network. */
971 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
972 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
973 err, (void *)p));
974
975 if (err != ERR_OK) {
976 if (recvd > 0) {
977 /* already received data, return that (this trusts in getting the same error from
978 netconn layer again next time netconn_recv is called) */
979 goto lwip_recv_tcp_done;
980 }
981 /* We should really do some error checking here. */
982 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
983 lwip_strerr(err)));
984 sock_set_errno(sock, err_to_errno(err));
985 if (err == ERR_CLSD) {
986 return 0;
987 } else {
988 return -1;
989 }
990 }
991 LWIP_ASSERT("p != NULL", p != NULL);
992 sock->lastdata.pbuf = p;
993 }
994
995 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
996 p->tot_len, (int)recv_left, (int)recvd));
997
998 if (recv_left > p->tot_len) {
999 copylen = p->tot_len;
1000 } else {
1001 copylen = (u16_t)recv_left;
1002 }
1003 if (recvd + copylen < recvd) {
1004 /* overflow */
1005 copylen = (u16_t)(SSIZE_MAX - recvd);
1006 }
1007
1008 /* copy the contents of the received buffer into
1009 the supplied memory pointer mem */
1010 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
1011
1012 recvd += copylen;
1013
1014 /* TCP combines multiple pbufs for one recv */
1015 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
1016 recv_left -= copylen;
1017
1018 /* Unless we peek the incoming message... */
1019 if ((flags & MSG_PEEK) == 0) {
1020 /* ... check if there is data left in the pbuf */
1021 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
1022 if (p->tot_len - copylen > 0) {
1023 /* If so, it should be saved in the sock structure for the next recv call.
1024 We store the pbuf but hide/free the consumed data: */
1025 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1026 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1027 } else {
1028 sock->lastdata.pbuf = NULL;
1029 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1030 pbuf_free(p);
1031 }
1032 }
1033 /* once we have some data to return, only add more if we don't need to wait */
1034 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1035 /* @todo: do we need to support peeking more than one pbuf? */
1036 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1037 lwip_recv_tcp_done:
1038 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1039 /* ensure window update after copying all data */
1040 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1041 }
1042 sock_set_errno(sock, 0);
1043 return recvd;
1044 }
1045 #endif
1046
1047 /* Convert a netbuf's address data to struct sockaddr */
1048 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1049 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1050 struct sockaddr *from, socklen_t *fromlen)
1051 {
1052 int truncated = 0;
1053 union sockaddr_aligned saddr;
1054
1055 LWIP_UNUSED_ARG(conn);
1056
1057 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1058 LWIP_ASSERT("from != NULL", from != NULL);
1059 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1060
1061 #if LWIP_IPV4 && LWIP_IPV6
1062 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1063 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1064 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1065 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1066 }
1067 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1068
1069 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1070 DF_NADDR(*fromaddr);
1071 if (*fromlen < saddr.sa.sa_len) {
1072 truncated = 1;
1073 } else if (*fromlen > saddr.sa.sa_len) {
1074 *fromlen = saddr.sa.sa_len;
1075 }
1076 MEMCPY(from, &saddr, *fromlen);
1077 return truncated;
1078 }
1079
1080 #if LWIP_TCP
1081 /* Helper function to get a tcp socket's remote address info */
1082 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1083 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1084 {
1085 if (sock == NULL) {
1086 return 0;
1087 }
1088 LWIP_UNUSED_ARG(dbg_fn);
1089 LWIP_UNUSED_ARG(dbg_s);
1090 LWIP_UNUSED_ARG(dbg_ret);
1091
1092 #if !SOCKETS_DEBUG
1093 if (from && fromlen)
1094 #endif /* !SOCKETS_DEBUG */
1095 {
1096 /* get remote addr/port from tcp_pcb */
1097 u16_t port;
1098 ip_addr_t tmpaddr;
1099 err_t err = netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1100 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1101 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1102 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1103 if (!err && from && fromlen) {
1104 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1105 }
1106 }
1107 return 0;
1108 }
1109 #endif
1110
1111 /* Helper function to receive a netbuf from a udp or raw netconn.
1112 * Keeps sock->lastdata for peeking.
1113 */
1114 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1115 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1116 {
1117 struct netbuf *buf;
1118 u8_t apiflags;
1119 err_t err;
1120 u16_t buflen, copylen, copied;
1121 int i;
1122
1123 LWIP_UNUSED_ARG(dbg_s);
1124 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1125
1126 if (flags & MSG_DONTWAIT) {
1127 apiflags = NETCONN_DONTBLOCK;
1128 } else {
1129 apiflags = 0;
1130 }
1131
1132 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1133 /* Check if there is data left from the last recv operation. */
1134 buf = sock->lastdata.netbuf;
1135 if (buf == NULL) {
1136 /* No data was left from the previous operation, so we try to get
1137 some from the network. */
1138 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1139 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1140 err, (void *)buf));
1141
1142 if (err != ERR_OK) {
1143 return err;
1144 }
1145 LWIP_ASSERT("buf != NULL", buf != NULL);
1146 sock->lastdata.netbuf = buf;
1147 }
1148 buflen = buf->p->tot_len;
1149 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1150
1151 copied = 0;
1152 /* copy the pbuf payload into the iovs */
1153 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1154 u16_t len_left = (u16_t)(buflen - copied);
1155 if (msg->msg_iov[i].iov_len > len_left) {
1156 copylen = len_left;
1157 } else {
1158 copylen = (u16_t)msg->msg_iov[i].iov_len;
1159 }
1160
1161 /* copy the contents of the received buffer into
1162 the supplied memory buffer */
1163 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1164 copied = (u16_t)(copied + copylen);
1165 }
1166
1167 /* Check to see from where the data was.*/
1168 #if !SOCKETS_DEBUG
1169 if (msg->msg_name && msg->msg_namelen)
1170 #endif /* !SOCKETS_DEBUG */
1171 {
1172 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1173 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1174 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1175 if (msg->msg_name && msg->msg_namelen) {
1176 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1177 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1178 }
1179 }
1180
1181 /* Initialize flag output */
1182 msg->msg_flags = 0;
1183
1184 if (msg->msg_control) {
1185 u8_t wrote_msg = 0;
1186 #if LWIP_NETBUF_RECVINFO
1187 /* Check if packet info was recorded */
1188 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1189 if (IP_IS_V4(&buf->toaddr)) {
1190 #if LWIP_IPV4
1191 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1192 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1193 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1194 chdr->cmsg_level = IPPROTO_IP;
1195 chdr->cmsg_type = IP_PKTINFO;
1196 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1197 pkti->ipi_ifindex = buf->p->if_idx;
1198 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1199 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1200 wrote_msg = 1;
1201 } else {
1202 msg->msg_flags |= MSG_CTRUNC;
1203 }
1204 #endif /* LWIP_IPV4 */
1205 }
1206 }
1207 #endif /* LWIP_NETBUF_RECVINFO */
1208
1209 if (!wrote_msg) {
1210 msg->msg_controllen = 0;
1211 }
1212 }
1213
1214 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1215 if ((flags & MSG_PEEK) == 0) {
1216 sock->lastdata.netbuf = NULL;
1217 netbuf_delete(buf);
1218 }
1219 if (datagram_len) {
1220 *datagram_len = buflen;
1221 }
1222 return ERR_OK;
1223 }
1224
1225 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1226 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1227 struct sockaddr *from, socklen_t *fromlen)
1228 {
1229 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1230 if (!is_distributed_net_enabled()) {
1231 return lwip_recvfrom_internal(s, mem, len, flags, from, fromlen);
1232 }
1233 return distributed_net_recvfrom(s, mem, len, flags, from, fromlen);
1234 }
1235
1236 ssize_t
lwip_recvfrom_internal(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1237 lwip_recvfrom_internal(int s, void *mem, size_t len, int flags,
1238 struct sockaddr *from, socklen_t *fromlen)
1239 {
1240 #endif
1241 struct lwip_sock *sock;
1242 ssize_t ret;
1243
1244 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1245 sock = get_socket(s);
1246 if (!sock) {
1247 return -1;
1248 }
1249 #if LWIP_TCP
1250 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1251 ret = lwip_recv_tcp(sock, mem, len, flags);
1252 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1253 done_socket(sock);
1254 return ret;
1255 } else
1256 #endif
1257 {
1258 u16_t datagram_len = 0;
1259 struct iovec vec;
1260 struct msghdr msg;
1261 err_t err;
1262 vec.iov_base = mem;
1263 vec.iov_len = len;
1264 msg.msg_control = NULL;
1265 msg.msg_controllen = 0;
1266 msg.msg_flags = 0;
1267 msg.msg_iov = &vec;
1268 msg.msg_iovlen = 1;
1269 msg.msg_name = from;
1270 msg.msg_namelen = (fromlen ? *fromlen : 0);
1271 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1272 if (err != ERR_OK) {
1273 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1274 s, lwip_strerr(err)));
1275 sock_set_errno(sock, err_to_errno(err));
1276 done_socket(sock);
1277 return -1;
1278 }
1279 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1280 if (fromlen) {
1281 *fromlen = msg.msg_namelen;
1282 }
1283 }
1284
1285 sock_set_errno(sock, 0);
1286 done_socket(sock);
1287 return ret;
1288 }
1289
1290 ssize_t
lwip_read(int s,void * mem,size_t len)1291 lwip_read(int s, void *mem, size_t len)
1292 {
1293 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1294 }
1295
1296 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1297 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1298 {
1299 struct msghdr msg;
1300
1301 msg.msg_name = NULL;
1302 msg.msg_namelen = 0;
1303 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1304 Blame the opengroup standard for this inconsistency. */
1305 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1306 msg.msg_iovlen = iovcnt;
1307 msg.msg_control = NULL;
1308 msg.msg_controllen = 0;
1309 msg.msg_flags = 0;
1310 return lwip_recvmsg(s, &msg, 0);
1311 }
1312
1313 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1314 lwip_recv(int s, void *mem, size_t len, int flags)
1315 {
1316 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1317 }
1318
1319 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1320 lwip_recvmsg(int s, struct msghdr *message, int flags)
1321 {
1322 struct lwip_sock *sock;
1323 int i;
1324 ssize_t buflen;
1325
1326 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1327 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1328 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1329 set_errno(EOPNOTSUPP); return -1;);
1330
1331 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1332 set_errno(EMSGSIZE);
1333 return -1;
1334 }
1335
1336 sock = get_socket(s);
1337 if (!sock) {
1338 return -1;
1339 }
1340
1341 /* check for valid vectors */
1342 buflen = 0;
1343 for (i = 0; i < message->msg_iovlen; i++) {
1344 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1345 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1346 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1347 sock_set_errno(sock, err_to_errno(ERR_VAL));
1348 done_socket(sock);
1349 return -1;
1350 }
1351 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1352 }
1353
1354 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1355 #if LWIP_TCP
1356 int recv_flags = flags;
1357 message->msg_flags = 0;
1358 /* recv the data */
1359 buflen = 0;
1360 for (i = 0; i < message->msg_iovlen; i++) {
1361 /* try to receive into this vector's buffer */
1362 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1363 if (recvd_local > 0) {
1364 /* sum up received bytes */
1365 buflen += recvd_local;
1366 }
1367 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1368 (flags & MSG_PEEK)) {
1369 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1370 if (buflen <= 0) {
1371 /* nothing received at all, propagate the error */
1372 buflen = recvd_local;
1373 }
1374 break;
1375 }
1376 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1377 recv_flags |= MSG_DONTWAIT;
1378 }
1379 if (buflen > 0) {
1380 /* reset socket error since we have received something */
1381 sock_set_errno(sock, 0);
1382 }
1383 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1384 done_socket(sock);
1385 return buflen;
1386 #else /* LWIP_TCP */
1387 sock_set_errno(sock, err_to_errno(ERR_ARG));
1388 done_socket(sock);
1389 return -1;
1390 #endif /* LWIP_TCP */
1391 }
1392 /* else, UDP and RAW NETCONNs */
1393 #if LWIP_UDP || LWIP_RAW
1394 {
1395 u16_t datagram_len = 0;
1396 err_t err;
1397 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1398 if (err != ERR_OK) {
1399 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1400 s, lwip_strerr(err)));
1401 sock_set_errno(sock, err_to_errno(err));
1402 done_socket(sock);
1403 return -1;
1404 }
1405 if (datagram_len > buflen) {
1406 message->msg_flags |= MSG_TRUNC;
1407 }
1408
1409 sock_set_errno(sock, 0);
1410 done_socket(sock);
1411 return (int)datagram_len;
1412 }
1413 #else /* LWIP_UDP || LWIP_RAW */
1414 sock_set_errno(sock, err_to_errno(ERR_ARG));
1415 done_socket(sock);
1416 return -1;
1417 #endif /* LWIP_UDP || LWIP_RAW */
1418 }
1419
1420 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1421 lwip_send(int s, const void *data, size_t size, int flags)
1422 {
1423 struct lwip_sock *sock;
1424 err_t err;
1425 u8_t write_flags;
1426 size_t written;
1427
1428 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1429 s, data, size, flags));
1430
1431 sock = get_socket(s);
1432 if (!sock) {
1433 return -1;
1434 }
1435
1436 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1437 #if (LWIP_UDP || LWIP_RAW)
1438 done_socket(sock);
1439 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1440 return lwip_sendto_internal(s, data, size, flags, NULL, 0);
1441 #else
1442 return lwip_sendto(s, data, size, flags, NULL, 0);
1443 #endif
1444 #else /* (LWIP_UDP || LWIP_RAW) */
1445 sock_set_errno(sock, err_to_errno(ERR_ARG));
1446 done_socket(sock);
1447 return -1;
1448 #endif /* (LWIP_UDP || LWIP_RAW) */
1449 }
1450
1451 write_flags = (u8_t)(NETCONN_COPY |
1452 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1453 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1454 written = 0;
1455 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1456
1457 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1458 sock_set_errno(sock, err_to_errno(err));
1459 done_socket(sock);
1460 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1461 return (err == ERR_OK ? (ssize_t)written : -1);
1462 }
1463
1464 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1465 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1466 {
1467 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL && LWIP_DISTRIBUTED_NET_ENABLE_SENDMSG
1468 if (!is_distributed_net_enabled()) {
1469 return lwip_sendmsg_internal(s, msg, flags);
1470 }
1471 return distributed_net_sendmsg(s, msg, flags);
1472 }
1473
1474 ssize_t
lwip_sendmsg_internal(int s,const struct msghdr * msg,int flags)1475 lwip_sendmsg_internal(int s, const struct msghdr *msg, int flags)
1476 {
1477 #endif
1478 struct lwip_sock *sock;
1479 #if LWIP_TCP
1480 u8_t write_flags;
1481 size_t written;
1482 #endif
1483 err_t err = ERR_OK;
1484
1485 sock = get_socket(s);
1486 if (!sock) {
1487 return -1;
1488 }
1489
1490 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1491 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1492 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1493 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1494 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1495 sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;);
1496 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1497 sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;);
1498
1499 LWIP_UNUSED_ARG(msg->msg_control);
1500 LWIP_UNUSED_ARG(msg->msg_controllen);
1501 LWIP_UNUSED_ARG(msg->msg_flags);
1502
1503 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1504 #if LWIP_TCP
1505 write_flags = (u8_t)(NETCONN_COPY |
1506 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1507 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1508
1509 written = 0;
1510 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1511 sock_set_errno(sock, err_to_errno(err));
1512 done_socket(sock);
1513 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1514 return (err == ERR_OK ? (ssize_t)written : -1);
1515 #else /* LWIP_TCP */
1516 sock_set_errno(sock, err_to_errno(ERR_ARG));
1517 done_socket(sock);
1518 return -1;
1519 #endif /* LWIP_TCP */
1520 }
1521 /* else, UDP and RAW NETCONNs */
1522 #if LWIP_UDP || LWIP_RAW
1523 {
1524 struct netbuf chain_buf;
1525 int i;
1526 ssize_t size = 0;
1527
1528 LWIP_UNUSED_ARG(flags);
1529 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1530 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1531 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1532
1533 /* initialize chain buffer with destination */
1534 memset(&chain_buf, 0, sizeof(struct netbuf));
1535 if (msg->msg_name) {
1536 u16_t remote_port;
1537 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1538 netbuf_fromport(&chain_buf) = remote_port;
1539 }
1540 #if LWIP_NETIF_TX_SINGLE_PBUF
1541 for (i = 0; i < msg->msg_iovlen; i++) {
1542 size += msg->msg_iov[i].iov_len;
1543 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1544 /* overflow */
1545 goto sendmsg_emsgsize;
1546 }
1547 }
1548 if (size > 0xFFFF) {
1549 /* overflow */
1550 goto sendmsg_emsgsize;
1551 }
1552 /* Allocate a new netbuf and copy the data into it. */
1553 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1554 err = ERR_MEM;
1555 } else {
1556 /* flatten the IO vectors */
1557 size_t offset = 0;
1558 for (i = 0; i < msg->msg_iovlen; i++) {
1559 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1560 offset += msg->msg_iov[i].iov_len;
1561 }
1562 #if LWIP_CHECKSUM_ON_COPY
1563 {
1564 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1565 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1566 netbuf_set_chksum(&chain_buf, chksum);
1567 }
1568 #endif /* LWIP_CHECKSUM_ON_COPY */
1569 err = ERR_OK;
1570 }
1571 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1572 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1573 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1574 for (i = 0; i < msg->msg_iovlen; i++) {
1575 struct pbuf *p;
1576 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1577 /* overflow */
1578 goto sendmsg_emsgsize;
1579 }
1580 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1581 if (p == NULL) {
1582 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1583 break;
1584 }
1585 p->payload = msg->msg_iov[i].iov_base;
1586 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1587 /* netbuf empty, add new pbuf */
1588 if (chain_buf.p == NULL) {
1589 chain_buf.p = chain_buf.ptr = p;
1590 /* add pbuf to existing pbuf chain */
1591 } else {
1592 if (chain_buf.p->tot_len + p->len > 0xffff) {
1593 /* overflow */
1594 pbuf_free(p);
1595 goto sendmsg_emsgsize;
1596 }
1597 pbuf_cat(chain_buf.p, p);
1598 }
1599 }
1600 /* save size of total chain */
1601 if (err == ERR_OK) {
1602 size = netbuf_len(&chain_buf);
1603 }
1604 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1605
1606 if (err == ERR_OK) {
1607 #if LWIP_IPV4 && LWIP_IPV6
1608 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1609 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1610 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1611 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1612 }
1613 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1614
1615 /* send the data */
1616 err = netconn_send(sock->conn, &chain_buf);
1617 }
1618
1619 /* deallocated the buffer */
1620 netbuf_free(&chain_buf);
1621
1622 sock_set_errno(sock, err_to_errno(err));
1623 done_socket(sock);
1624 return (err == ERR_OK ? size : -1);
1625 sendmsg_emsgsize:
1626 sock_set_errno(sock, EMSGSIZE);
1627 netbuf_free(&chain_buf);
1628 done_socket(sock);
1629 return -1;
1630 }
1631 #else /* LWIP_UDP || LWIP_RAW */
1632 sock_set_errno(sock, err_to_errno(ERR_ARG));
1633 done_socket(sock);
1634 return -1;
1635 #endif /* LWIP_UDP || LWIP_RAW */
1636 }
1637
1638 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1639 lwip_sendto(int s, const void *data, size_t size, int flags,
1640 const struct sockaddr *to, socklen_t tolen)
1641 {
1642 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1643 if (!is_distributed_net_enabled()) {
1644 return lwip_sendto_internal(s, data, size, flags, to, tolen);
1645 }
1646 return distributed_net_sendto(s, data, size, flags, to, tolen);
1647 }
1648
1649 ssize_t
lwip_sendto_internal(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1650 lwip_sendto_internal(int s, const void *data, size_t size, int flags,
1651 const struct sockaddr *to, socklen_t tolen)
1652 {
1653 #endif
1654 struct lwip_sock *sock;
1655 err_t err;
1656 u16_t short_size;
1657 u16_t remote_port;
1658 struct netbuf buf;
1659
1660 sock = get_socket(s);
1661 if (!sock) {
1662 return -1;
1663 }
1664
1665 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1666 #if LWIP_TCP
1667 done_socket(sock);
1668 return lwip_send(s, data, size, flags);
1669 #else /* LWIP_TCP */
1670 LWIP_UNUSED_ARG(flags);
1671 sock_set_errno(sock, err_to_errno(ERR_ARG));
1672 done_socket(sock);
1673 return -1;
1674 #endif /* LWIP_TCP */
1675 }
1676
1677 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1678 /* cannot fit into one datagram (at least for us) */
1679 sock_set_errno(sock, EMSGSIZE);
1680 done_socket(sock);
1681 return -1;
1682 }
1683 short_size = (u16_t)size;
1684 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1685 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1686 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1687 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1688 LWIP_UNUSED_ARG(tolen);
1689
1690 /* initialize a buffer */
1691 buf.p = buf.ptr = NULL;
1692 #if LWIP_CHECKSUM_ON_COPY
1693 buf.flags = 0;
1694 #endif /* LWIP_CHECKSUM_ON_COPY */
1695 if (to) {
1696 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1697 } else {
1698 remote_port = 0;
1699 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1700 }
1701 netbuf_fromport(&buf) = remote_port;
1702
1703
1704 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1705 s, data, short_size, flags));
1706 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1707 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1708
1709 /* make the buffer point to the data that should be sent */
1710 #if LWIP_NETIF_TX_SINGLE_PBUF
1711 /* Allocate a new netbuf and copy the data into it. */
1712 if (netbuf_alloc(&buf, short_size) == NULL) {
1713 err = ERR_MEM;
1714 } else {
1715 #if LWIP_CHECKSUM_ON_COPY
1716 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1717 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1718 netbuf_set_chksum(&buf, chksum);
1719 } else
1720 #endif /* LWIP_CHECKSUM_ON_COPY */
1721 {
1722 MEMCPY(buf.p->payload, data, short_size);
1723 }
1724 err = ERR_OK;
1725 }
1726 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1727 err = netbuf_ref(&buf, data, short_size);
1728 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1729 if (err == ERR_OK) {
1730 #if LWIP_IPV4 && LWIP_IPV6
1731 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1732 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1733 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1734 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1735 }
1736 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1737
1738 /* send the data */
1739 err = netconn_send(sock->conn, &buf);
1740 }
1741
1742 /* deallocated the buffer */
1743 netbuf_free(&buf);
1744
1745 sock_set_errno(sock, err_to_errno(err));
1746 done_socket(sock);
1747 return (err == ERR_OK ? short_size : -1);
1748 }
1749
1750 int
lwip_socket(int domain,int type,int protocol)1751 lwip_socket(int domain, int type, int protocol)
1752 {
1753 struct netconn *conn;
1754 int i;
1755
1756 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1757
1758 /* create a netconn */
1759 switch (type) {
1760 case SOCK_RAW:
1761 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1762 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1763 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1764 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1765 break;
1766 case SOCK_DGRAM:
1767 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1768 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1769 DEFAULT_SOCKET_EVENTCB);
1770 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1771 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1772 #if LWIP_NETBUF_RECVINFO
1773 if (conn) {
1774 /* netconn layer enables pktinfo by default, sockets default to off */
1775 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1776 }
1777 #endif /* LWIP_NETBUF_RECVINFO */
1778 break;
1779 case SOCK_STREAM:
1780 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1781 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1782 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1783 break;
1784 default:
1785 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1786 domain, type, protocol));
1787 set_errno(EINVAL);
1788 return -1;
1789 }
1790
1791 if (!conn) {
1792 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1793 set_errno(ENOBUFS);
1794 return -1;
1795 }
1796
1797 i = alloc_socket(conn, 0);
1798
1799 if (i == -1) {
1800 netconn_delete(conn);
1801 set_errno(ENFILE);
1802 return -1;
1803 }
1804 conn->socket = i;
1805 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1806 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1807 set_errno(0);
1808 return i;
1809 }
1810
1811 ssize_t
lwip_write(int s,const void * data,size_t size)1812 lwip_write(int s, const void *data, size_t size)
1813 {
1814 return lwip_send(s, data, size, 0);
1815 }
1816
1817 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1818 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1819 {
1820 struct msghdr msg;
1821
1822 msg.msg_name = NULL;
1823 msg.msg_namelen = 0;
1824 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1825 Blame the opengroup standard for this inconsistency. */
1826 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1827 msg.msg_iovlen = iovcnt;
1828 msg.msg_control = NULL;
1829 msg.msg_controllen = 0;
1830 msg.msg_flags = 0;
1831 return lwip_sendmsg(s, &msg, 0);
1832 }
1833
1834 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1835 /* Add select_cb to select_cb_list. */
1836 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1837 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1838 {
1839 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1840
1841 /* Protect the select_cb_list */
1842 LWIP_SOCKET_SELECT_PROTECT(lev);
1843
1844 /* Put this select_cb on top of list */
1845 select_cb->next = select_cb_list;
1846 if (select_cb_list != NULL) {
1847 select_cb_list->prev = select_cb;
1848 }
1849 select_cb_list = select_cb;
1850 #if !LWIP_TCPIP_CORE_LOCKING
1851 /* Increasing this counter tells select_check_waiters that the list has changed. */
1852 select_cb_ctr++;
1853 #endif
1854
1855 /* Now we can safely unprotect */
1856 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1857 }
1858
1859 /* Remove select_cb from select_cb_list. */
1860 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1861 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1862 {
1863 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1864
1865 /* Take us off the list */
1866 LWIP_SOCKET_SELECT_PROTECT(lev);
1867 if (select_cb->next != NULL) {
1868 select_cb->next->prev = select_cb->prev;
1869 }
1870 if (select_cb_list == select_cb) {
1871 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1872 select_cb_list = select_cb->next;
1873 } else {
1874 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1875 select_cb->prev->next = select_cb->next;
1876 }
1877 #if !LWIP_TCPIP_CORE_LOCKING
1878 /* Increasing this counter tells select_check_waiters that the list has changed. */
1879 select_cb_ctr++;
1880 #endif
1881 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1882 }
1883 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1884
1885 #if LWIP_SOCKET_SELECT
1886 /**
1887 * Go through the readset and writeset lists and see which socket of the sockets
1888 * set in the sets has events. On return, readset, writeset and exceptset have
1889 * the sockets enabled that had events.
1890 *
1891 * @param maxfdp1 the highest socket index in the sets
1892 * @param readset_in set of sockets to check for read events
1893 * @param writeset_in set of sockets to check for write events
1894 * @param exceptset_in set of sockets to check for error events
1895 * @param readset_out set of sockets that had read events
1896 * @param writeset_out set of sockets that had write events
1897 * @param exceptset_out set os sockets that had error events
1898 * @return number of sockets that had events (read/write/exception) (>= 0)
1899 */
1900 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1901 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1902 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1903 {
1904 int i, nready = 0;
1905 fd_set lreadset, lwriteset, lexceptset;
1906 struct lwip_sock *sock;
1907 SYS_ARCH_DECL_PROTECT(lev);
1908
1909 FD_ZERO(&lreadset);
1910 FD_ZERO(&lwriteset);
1911 FD_ZERO(&lexceptset);
1912
1913 /* Go through each socket in each list to count number of sockets which
1914 currently match */
1915 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1916 /* if this FD is not in the set, continue */
1917 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1918 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1919 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1920 continue;
1921 }
1922 /* First get the socket's status (protected)... */
1923 SYS_ARCH_PROTECT(lev);
1924 sock = tryget_socket_unconn_locked(i);
1925 if (sock != NULL) {
1926 void *lastdata = sock->lastdata.pbuf;
1927 s16_t rcvevent = sock->rcvevent;
1928 u16_t sendevent = sock->sendevent;
1929 u16_t errevent = sock->errevent;
1930 SYS_ARCH_UNPROTECT(lev);
1931
1932 /* ... then examine it: */
1933 /* See if netconn of this socket is ready for read */
1934 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1935 FD_SET(i, &lreadset);
1936 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1937 nready++;
1938 }
1939 /* See if netconn of this socket is ready for write */
1940 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1941 FD_SET(i, &lwriteset);
1942 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1943 nready++;
1944 }
1945 /* See if netconn of this socket had an error */
1946 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1947 FD_SET(i, &lexceptset);
1948 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1949 nready++;
1950 }
1951 done_socket(sock);
1952 } else {
1953 SYS_ARCH_UNPROTECT(lev);
1954 /* no a valid open socket */
1955 return -1;
1956 }
1957 }
1958 /* copy local sets to the ones provided as arguments */
1959 *readset_out = lreadset;
1960 *writeset_out = lwriteset;
1961 *exceptset_out = lexceptset;
1962
1963 LWIP_ASSERT("nready >= 0", nready >= 0);
1964 return nready;
1965 }
1966
1967 #if LWIP_NETCONN_FULLDUPLEX
1968 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1969 * All sockets are marked (and later unmarked), whether they are open or not.
1970 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1971 */
1972 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)1973 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1974 {
1975 SYS_ARCH_DECL_PROTECT(lev);
1976 if (fdset) {
1977 int i;
1978 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1979 /* if this FD is in the set, lock it (unless already done) */
1980 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1981 struct lwip_sock *sock;
1982 SYS_ARCH_PROTECT(lev);
1983 sock = tryget_socket_unconn_locked(i);
1984 if (sock != NULL) {
1985 /* leave the socket used until released by lwip_select_dec_sockets_used */
1986 FD_SET(i, used_sockets);
1987 }
1988 SYS_ARCH_UNPROTECT(lev);
1989 }
1990 }
1991 }
1992 }
1993
1994 /* Mark all sockets passed to select as used to prevent them from being freed
1995 * from other threads while select is running.
1996 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1997 * to unmark them correctly.
1998 */
1999 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)2000 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
2001 {
2002 FD_ZERO(used_sockets);
2003 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
2004 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
2005 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
2006 }
2007
2008 /* Let go all sockets that were marked as used when starting select */
2009 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)2010 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
2011 {
2012 int i;
2013 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
2014 /* if this FD is not in the set, continue */
2015 if (FD_ISSET(i, used_sockets)) {
2016 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
2017 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2018 if (sock != NULL) {
2019 done_socket(sock);
2020 }
2021 }
2022 }
2023 }
2024 #else /* LWIP_NETCONN_FULLDUPLEX */
2025 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
2026 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
2027 #endif /* LWIP_NETCONN_FULLDUPLEX */
2028
2029 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)2030 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
2031 struct timeval *timeout)
2032 {
2033 u32_t waitres = 0;
2034 int nready;
2035 fd_set lreadset, lwriteset, lexceptset;
2036 u32_t msectimeout;
2037 int i;
2038 int maxfdp2;
2039 #if LWIP_NETCONN_SEM_PER_THREAD
2040 int waited = 0;
2041 #endif
2042 #if LWIP_NETCONN_FULLDUPLEX
2043 fd_set used_sockets;
2044 #endif
2045 SYS_ARCH_DECL_PROTECT(lev);
2046
2047 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
2048 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
2049 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
2050 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
2051
2052 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
2053 set_errno(EINVAL);
2054 return -1;
2055 }
2056
2057 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
2058
2059 /* Go through each socket in each list to count number of sockets which
2060 currently match */
2061 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2062
2063 if (nready < 0) {
2064 /* one of the sockets in one of the fd_sets was invalid */
2065 set_errno(EBADF);
2066 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2067 return -1;
2068 } else if (nready > 0) {
2069 /* one or more sockets are set, no need to wait */
2070 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2071 } else {
2072 /* If we don't have any current events, then suspend if we are supposed to */
2073 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2074 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2075 /* This is OK as the local fdsets are empty and nready is zero,
2076 or we would have returned earlier. */
2077 } else {
2078 /* None ready: add our semaphore to list:
2079 We don't actually need any dynamic memory. Our entry on the
2080 list is only valid while we are in this function, so it's ok
2081 to use local variables (unless we're running in MPU compatible
2082 mode). */
2083 API_SELECT_CB_VAR_DECLARE(select_cb);
2084 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2085 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2086
2087 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2088 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2089 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2090 #if LWIP_NETCONN_SEM_PER_THREAD
2091 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2092 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2093 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2094 /* failed to create semaphore */
2095 set_errno(ENOMEM);
2096 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2097 API_SELECT_CB_VAR_FREE(select_cb);
2098 return -1;
2099 }
2100 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2101
2102 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2103
2104 /* Increase select_waiting for each socket we are interested in */
2105 maxfdp2 = maxfdp1;
2106 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2107 if ((readset && FD_ISSET(i, readset)) ||
2108 (writeset && FD_ISSET(i, writeset)) ||
2109 (exceptset && FD_ISSET(i, exceptset))) {
2110 struct lwip_sock *sock;
2111 SYS_ARCH_PROTECT(lev);
2112 sock = tryget_socket_unconn_locked(i);
2113 if (sock != NULL) {
2114 sock->select_waiting++;
2115 if (sock->select_waiting == 0) {
2116 /* overflow - too many threads waiting */
2117 sock->select_waiting--;
2118 nready = -1;
2119 maxfdp2 = i;
2120 SYS_ARCH_UNPROTECT(lev);
2121 done_socket(sock);
2122 set_errno(EBUSY);
2123 break;
2124 }
2125 SYS_ARCH_UNPROTECT(lev);
2126 done_socket(sock);
2127 } else {
2128 /* Not a valid socket */
2129 nready = -1;
2130 maxfdp2 = i;
2131 SYS_ARCH_UNPROTECT(lev);
2132 set_errno(EBADF);
2133 break;
2134 }
2135 }
2136 }
2137
2138 if (nready >= 0) {
2139 /* Call lwip_selscan again: there could have been events between
2140 the last scan (without us on the list) and putting us on the list! */
2141 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2142 if (nready < 0) {
2143 set_errno(EBADF);
2144 } else if (!nready) {
2145 /* Still none ready, just wait to be woken */
2146 if (timeout == 0) {
2147 /* Wait forever */
2148 msectimeout = 0;
2149 } else {
2150 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2151 if (msecs_long <= 0) {
2152 /* Wait 1ms at least (0 means wait forever) */
2153 msectimeout = 1;
2154 } else {
2155 msectimeout = (u32_t)msecs_long;
2156 }
2157 }
2158
2159 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2160 #if LWIP_NETCONN_SEM_PER_THREAD
2161 waited = 1;
2162 #endif
2163 }
2164 }
2165
2166 /* Decrease select_waiting for each socket we are interested in */
2167 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2168 if ((readset && FD_ISSET(i, readset)) ||
2169 (writeset && FD_ISSET(i, writeset)) ||
2170 (exceptset && FD_ISSET(i, exceptset))) {
2171 struct lwip_sock *sock;
2172 SYS_ARCH_PROTECT(lev);
2173 sock = tryget_socket_unconn_nouse(i);
2174 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2175 if (sock != NULL) {
2176 /* for now, handle select_waiting==0... */
2177 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2178 if (sock->select_waiting > 0) {
2179 sock->select_waiting--;
2180 }
2181 SYS_ARCH_UNPROTECT(lev);
2182 } else {
2183 SYS_ARCH_UNPROTECT(lev);
2184 /* Not a valid socket */
2185 nready = -1;
2186 set_errno(EBADF);
2187 }
2188 }
2189 }
2190
2191 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2192
2193 #if LWIP_NETCONN_SEM_PER_THREAD
2194 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2195 /* don't leave the thread-local semaphore signalled */
2196 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2197 }
2198 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2199 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2200 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2201 API_SELECT_CB_VAR_FREE(select_cb);
2202
2203 if (nready < 0) {
2204 /* This happens when a socket got closed while waiting */
2205 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2206 return -1;
2207 }
2208
2209 if (waitres == SYS_ARCH_TIMEOUT) {
2210 /* Timeout */
2211 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2212 /* This is OK as the local fdsets are empty and nready is zero,
2213 or we would have returned earlier. */
2214 } else {
2215 /* See what's set now after waiting */
2216 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2217 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2218 if (nready < 0) {
2219 set_errno(EBADF);
2220 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2221 return -1;
2222 }
2223 }
2224 }
2225 }
2226
2227 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2228 set_errno(0);
2229 if (readset) {
2230 *readset = lreadset;
2231 }
2232 if (writeset) {
2233 *writeset = lwriteset;
2234 }
2235 if (exceptset) {
2236 *exceptset = lexceptset;
2237 }
2238 return nready;
2239 }
2240 #endif /* LWIP_SOCKET_SELECT */
2241
2242 #if LWIP_SOCKET_POLL
2243 /** Options for the lwip_pollscan function. */
2244 enum lwip_pollscan_opts
2245 {
2246 /** Clear revents in each struct pollfd. */
2247 LWIP_POLLSCAN_CLEAR = 1,
2248
2249 /** Increment select_waiting in each struct lwip_sock. */
2250 LWIP_POLLSCAN_INC_WAIT = 2,
2251
2252 /** Decrement select_waiting in each struct lwip_sock. */
2253 LWIP_POLLSCAN_DEC_WAIT = 4
2254 };
2255
2256 /**
2257 * Update revents in each struct pollfd.
2258 * Optionally update select_waiting in struct lwip_sock.
2259 *
2260 * @param fds array of structures to update
2261 * @param nfds number of structures in fds
2262 * @param opts what to update and how
2263 * @return number of structures that have revents != 0
2264 */
2265 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2266 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2267 {
2268 int nready = 0;
2269 nfds_t fdi;
2270 struct lwip_sock *sock;
2271 SYS_ARCH_DECL_PROTECT(lev);
2272
2273 /* Go through each struct pollfd in the array. */
2274 for (fdi = 0; fdi < nfds; fdi++) {
2275 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2276 fds[fdi].revents = 0;
2277 }
2278
2279 /* Negative fd means the caller wants us to ignore this struct.
2280 POLLNVAL means we already detected that the fd is invalid;
2281 if another thread has since opened a new socket with that fd,
2282 we must not use that socket. */
2283 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2284 /* First get the socket's status (protected)... */
2285 SYS_ARCH_PROTECT(lev);
2286 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2287 if (sock != NULL) {
2288 void* lastdata = sock->lastdata.pbuf;
2289 s16_t rcvevent = sock->rcvevent;
2290 u16_t sendevent = sock->sendevent;
2291 u16_t errevent = sock->errevent;
2292
2293 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2294 sock->select_waiting++;
2295 if (sock->select_waiting == 0) {
2296 /* overflow - too many threads waiting */
2297 sock->select_waiting--;
2298 nready = -1;
2299 SYS_ARCH_UNPROTECT(lev);
2300 done_socket(sock);
2301 break;
2302 }
2303 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2304 /* for now, handle select_waiting==0... */
2305 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2306 if (sock->select_waiting > 0) {
2307 sock->select_waiting--;
2308 }
2309 }
2310 SYS_ARCH_UNPROTECT(lev);
2311 done_socket(sock);
2312
2313 /* ... then examine it: */
2314 /* See if netconn of this socket is ready for read */
2315 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2316 fds[fdi].revents |= POLLIN;
2317 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2318 }
2319 /* See if netconn of this socket is ready for write */
2320 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2321 fds[fdi].revents |= POLLOUT;
2322 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2323 }
2324 /* See if netconn of this socket had an error */
2325 if (errevent != 0) {
2326 /* POLLERR is output only. */
2327 fds[fdi].revents |= POLLERR;
2328 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2329 }
2330 } else {
2331 /* Not a valid socket */
2332 SYS_ARCH_UNPROTECT(lev);
2333 /* POLLNVAL is output only. */
2334 fds[fdi].revents |= POLLNVAL;
2335 return -1;
2336 }
2337 }
2338
2339 /* Will return the number of structures that have events,
2340 not the number of events. */
2341 if (fds[fdi].revents != 0) {
2342 nready++;
2343 }
2344 }
2345
2346 LWIP_ASSERT("nready >= 0", nready >= 0);
2347 return nready;
2348 }
2349
2350 #if LWIP_NETCONN_FULLDUPLEX
2351 /* Mark all sockets as used.
2352 *
2353 * All sockets are marked (and later unmarked), whether they are open or not.
2354 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2355 */
2356 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2357 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2358 {
2359 nfds_t fdi;
2360
2361 if(fds) {
2362 /* Go through each struct pollfd in the array. */
2363 for (fdi = 0; fdi < nfds; fdi++) {
2364 /* Increase the reference counter */
2365 tryget_socket_unconn(fds[fdi].fd);
2366 }
2367 }
2368 }
2369
2370 /* Let go all sockets that were marked as used when starting poll */
2371 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2372 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2373 {
2374 nfds_t fdi;
2375
2376 if(fds) {
2377 /* Go through each struct pollfd in the array. */
2378 for (fdi = 0; fdi < nfds; fdi++) {
2379 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2380 if (sock != NULL) {
2381 done_socket(sock);
2382 }
2383 }
2384 }
2385 }
2386 #else /* LWIP_NETCONN_FULLDUPLEX */
2387 #define lwip_poll_inc_sockets_used(fds, nfds)
2388 #define lwip_poll_dec_sockets_used(fds, nfds)
2389 #endif /* LWIP_NETCONN_FULLDUPLEX */
2390
2391 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2392 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2393 {
2394 u32_t waitres = 0;
2395 int nready;
2396 u32_t msectimeout;
2397 #if LWIP_NETCONN_SEM_PER_THREAD
2398 int waited = 0;
2399 #endif
2400
2401 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2402 (void*)fds, (int)nfds, timeout));
2403 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2404 set_errno(EINVAL); return -1;);
2405
2406 lwip_poll_inc_sockets_used(fds, nfds);
2407
2408 /* Go through each struct pollfd to count number of structures
2409 which currently match */
2410 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2411
2412 if (nready < 0) {
2413 lwip_poll_dec_sockets_used(fds, nfds);
2414 return -1;
2415 }
2416
2417 /* If we don't have any current events, then suspend if we are supposed to */
2418 if (!nready) {
2419 API_SELECT_CB_VAR_DECLARE(select_cb);
2420
2421 if (timeout == 0) {
2422 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2423 goto return_success;
2424 }
2425 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2426 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2427
2428 /* None ready: add our semaphore to list:
2429 We don't actually need any dynamic memory. Our entry on the
2430 list is only valid while we are in this function, so it's ok
2431 to use local variables. */
2432
2433 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2434 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2435 #if LWIP_NETCONN_SEM_PER_THREAD
2436 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2437 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2438 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2439 /* failed to create semaphore */
2440 set_errno(EAGAIN);
2441 lwip_poll_dec_sockets_used(fds, nfds);
2442 API_SELECT_CB_VAR_FREE(select_cb);
2443 return -1;
2444 }
2445 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2446
2447 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2448
2449 /* Increase select_waiting for each socket we are interested in.
2450 Also, check for events again: there could have been events between
2451 the last scan (without us on the list) and putting us on the list! */
2452 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2453
2454 if (!nready) {
2455 /* Still none ready, just wait to be woken */
2456 if (timeout < 0) {
2457 /* Wait forever */
2458 msectimeout = 0;
2459 } else {
2460 /* timeout == 0 would have been handled earlier. */
2461 LWIP_ASSERT("timeout > 0", timeout > 0);
2462 msectimeout = timeout;
2463 }
2464 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2465 #if LWIP_NETCONN_SEM_PER_THREAD
2466 waited = 1;
2467 #endif
2468 }
2469
2470 /* Decrease select_waiting for each socket we are interested in,
2471 and check which events occurred while we waited. */
2472 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2473
2474 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2475
2476 #if LWIP_NETCONN_SEM_PER_THREAD
2477 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2478 /* don't leave the thread-local semaphore signalled */
2479 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2480 }
2481 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2482 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2483 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2484 API_SELECT_CB_VAR_FREE(select_cb);
2485
2486 if (nready < 0) {
2487 /* This happens when a socket got closed while waiting */
2488 lwip_poll_dec_sockets_used(fds, nfds);
2489 return -1;
2490 }
2491
2492 if (waitres == SYS_ARCH_TIMEOUT) {
2493 /* Timeout */
2494 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2495 goto return_success;
2496 }
2497 }
2498
2499 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2500 return_success:
2501 lwip_poll_dec_sockets_used(fds, nfds);
2502 set_errno(0);
2503 return nready;
2504 }
2505
2506 /**
2507 * Check whether event_callback should wake up a thread waiting in
2508 * lwip_poll.
2509 */
2510 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2511 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2512 {
2513 nfds_t fdi;
2514 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2515 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2516 if (pollfd->fd == fd) {
2517 /* Do not update pollfd->revents right here;
2518 that would be a data race because lwip_pollscan
2519 accesses revents without protecting. */
2520 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2521 return 1;
2522 }
2523 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2524 return 1;
2525 }
2526 if (has_errevent) {
2527 /* POLLERR is output only. */
2528 return 1;
2529 }
2530 }
2531 }
2532 return 0;
2533 }
2534 #endif /* LWIP_SOCKET_POLL */
2535
2536 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2537 /**
2538 * Callback registered in the netconn layer for each socket-netconn.
2539 * Processes recvevent (data available) and wakes up tasks waiting for select.
2540 *
2541 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2542 * must have the core lock held when signaling the following events
2543 * as they might cause select_list_cb to be checked:
2544 * NETCONN_EVT_RCVPLUS
2545 * NETCONN_EVT_SENDPLUS
2546 * NETCONN_EVT_ERROR
2547 * This requirement will be asserted in select_check_waiters()
2548 */
2549 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2550 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2551 {
2552 int s, check_waiters;
2553 struct lwip_sock *sock;
2554 SYS_ARCH_DECL_PROTECT(lev);
2555
2556 LWIP_UNUSED_ARG(len);
2557
2558 /* Get socket */
2559 if (conn) {
2560 s = conn->socket;
2561 if (s < 0) {
2562 /* Data comes in right away after an accept, even though
2563 * the server task might not have created a new socket yet.
2564 * Just count down (or up) if that's the case and we
2565 * will use the data later. Note that only receive events
2566 * can happen before the new socket is set up. */
2567 SYS_ARCH_PROTECT(lev);
2568 if (conn->socket < 0) {
2569 if (evt == NETCONN_EVT_RCVPLUS) {
2570 /* conn->socket is -1 on initialization
2571 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2572 conn->socket--;
2573 }
2574 SYS_ARCH_UNPROTECT(lev);
2575 return;
2576 }
2577 s = conn->socket;
2578 SYS_ARCH_UNPROTECT(lev);
2579 }
2580
2581 sock = get_socket(s);
2582 if (!sock) {
2583 return;
2584 }
2585 } else {
2586 return;
2587 }
2588
2589 check_waiters = 1;
2590 SYS_ARCH_PROTECT(lev);
2591 /* Set event as required */
2592 switch (evt) {
2593 case NETCONN_EVT_RCVPLUS:
2594 sock->rcvevent++;
2595 if (sock->rcvevent > 1) {
2596 check_waiters = 0;
2597 }
2598 break;
2599 case NETCONN_EVT_RCVMINUS:
2600 sock->rcvevent--;
2601 check_waiters = 0;
2602 break;
2603 case NETCONN_EVT_SENDPLUS:
2604 if (sock->sendevent) {
2605 check_waiters = 0;
2606 }
2607 sock->sendevent = 1;
2608 break;
2609 case NETCONN_EVT_SENDMINUS:
2610 sock->sendevent = 0;
2611 check_waiters = 0;
2612 break;
2613 case NETCONN_EVT_ERROR:
2614 sock->errevent = 1;
2615 break;
2616 default:
2617 LWIP_ASSERT("unknown event", 0);
2618 break;
2619 }
2620
2621 if (sock->select_waiting && check_waiters) {
2622 /* Save which events are active */
2623 int has_recvevent, has_sendevent, has_errevent;
2624 has_recvevent = sock->rcvevent > 0;
2625 has_sendevent = sock->sendevent != 0;
2626 has_errevent = sock->errevent != 0;
2627 SYS_ARCH_UNPROTECT(lev);
2628 /* Check any select calls waiting on this socket */
2629 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2630 } else {
2631 SYS_ARCH_UNPROTECT(lev);
2632 }
2633 poll_check_waiters(s, check_waiters);
2634 done_socket(sock);
2635 }
2636
2637 /**
2638 * Check if any select waiters are waiting on this socket and its events
2639 *
2640 * @note on synchronization of select_cb_list:
2641 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2642 * the core lock. We do a single pass through the list and signal any waiters.
2643 * Core lock should already be held when calling here!!!!
2644
2645 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2646 * of the loop, thus creating a possibility where a thread could modify the
2647 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2648 * detect this change and restart the list walk. The list is expected to be small
2649 */
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent)2650 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2651 {
2652 struct lwip_select_cb *scb;
2653 #if !LWIP_TCPIP_CORE_LOCKING
2654 int last_select_cb_ctr;
2655 SYS_ARCH_DECL_PROTECT(lev);
2656 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2657
2658 LWIP_ASSERT_CORE_LOCKED();
2659
2660 #if !LWIP_TCPIP_CORE_LOCKING
2661 SYS_ARCH_PROTECT(lev);
2662 again:
2663 /* remember the state of select_cb_list to detect changes */
2664 last_select_cb_ctr = select_cb_ctr;
2665 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2666 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2667 if (scb->sem_signalled == 0) {
2668 /* semaphore not signalled yet */
2669 int do_signal = 0;
2670 #if LWIP_SOCKET_POLL
2671 if (scb->poll_fds != NULL) {
2672 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2673 }
2674 #endif /* LWIP_SOCKET_POLL */
2675 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2676 else
2677 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2678 #if LWIP_SOCKET_SELECT
2679 {
2680 /* Test this select call for our socket */
2681 if (has_recvevent) {
2682 if (scb->readset && FD_ISSET(s, scb->readset)) {
2683 do_signal = 1;
2684 }
2685 }
2686 if (has_sendevent) {
2687 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2688 do_signal = 1;
2689 }
2690 }
2691 if (has_errevent) {
2692 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2693 do_signal = 1;
2694 }
2695 }
2696 }
2697 #endif /* LWIP_SOCKET_SELECT */
2698 if (do_signal) {
2699 scb->sem_signalled = 1;
2700 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2701 the semaphore, as this might lead to the select thread taking itself off the list,
2702 invalidating the semaphore. */
2703 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2704 }
2705 }
2706 #if LWIP_TCPIP_CORE_LOCKING
2707 }
2708 #else
2709 /* unlock interrupts with each step */
2710 SYS_ARCH_UNPROTECT(lev);
2711 /* this makes sure interrupt protection time is short */
2712 SYS_ARCH_PROTECT(lev);
2713 if (last_select_cb_ctr != select_cb_ctr) {
2714 /* someone has changed select_cb_list, restart at the beginning */
2715 goto again;
2716 }
2717 /* remember the state of select_cb_list to detect changes */
2718 last_select_cb_ctr = select_cb_ctr;
2719 }
2720 SYS_ARCH_UNPROTECT(lev);
2721 #endif
2722 }
2723 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2724
2725 /**
2726 * Close one end of a full-duplex connection.
2727 */
2728 int
lwip_shutdown(int s,int how)2729 lwip_shutdown(int s, int how)
2730 {
2731 struct lwip_sock *sock;
2732 err_t err;
2733 u8_t shut_rx = 0, shut_tx = 0;
2734
2735 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2736
2737 sock = get_socket(s);
2738 if (!sock) {
2739 return -1;
2740 }
2741
2742 if (sock->conn != NULL) {
2743 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2744 sock_set_errno(sock, EOPNOTSUPP);
2745 done_socket(sock);
2746 return -1;
2747 }
2748 } else {
2749 sock_set_errno(sock, ENOTCONN);
2750 done_socket(sock);
2751 return -1;
2752 }
2753
2754 if (how == SHUT_RD) {
2755 shut_rx = 1;
2756 } else if (how == SHUT_WR) {
2757 shut_tx = 1;
2758 } else if (how == SHUT_RDWR) {
2759 shut_rx = 1;
2760 shut_tx = 1;
2761 } else {
2762 sock_set_errno(sock, EINVAL);
2763 done_socket(sock);
2764 return -1;
2765 }
2766 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2767
2768 sock_set_errno(sock, err_to_errno(err));
2769 done_socket(sock);
2770 return (err == ERR_OK ? 0 : -1);
2771 }
2772
2773 static int
lwip_getaddrname(int s,struct sockaddr * name,socklen_t * namelen,u8_t local)2774 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2775 {
2776 struct lwip_sock *sock;
2777 union sockaddr_aligned saddr;
2778 ip_addr_t naddr;
2779 u16_t port;
2780 err_t err;
2781
2782 sock = get_socket(s);
2783 if (!sock) {
2784 return -1;
2785 }
2786
2787 /* get the IP address and port */
2788 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2789 if (err != ERR_OK) {
2790 sock_set_errno(sock, err_to_errno(err));
2791 done_socket(sock);
2792 return -1;
2793 }
2794
2795 #if LWIP_IPV4 && LWIP_IPV6
2796 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2797 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2798 IP_IS_V4_VAL(naddr)) {
2799 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2800 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2801 }
2802 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2803
2804 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2805
2806 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2807 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2808 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2809
2810 if (*namelen > saddr.sa.sa_len) {
2811 *namelen = saddr.sa.sa_len;
2812 }
2813 MEMCPY(name, &saddr, *namelen);
2814
2815 sock_set_errno(sock, 0);
2816 done_socket(sock);
2817 return 0;
2818 }
2819
2820 int
lwip_getpeername(int s,struct sockaddr * name,socklen_t * namelen)2821 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2822 {
2823 return lwip_getaddrname(s, name, namelen, 0);
2824 }
2825
2826 int
lwip_getsockname(int s,struct sockaddr * name,socklen_t * namelen)2827 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2828 {
2829 return lwip_getaddrname(s, name, namelen, 1);
2830 }
2831
2832 int
lwip_getsockopt(int s,int level,int optname,void * optval,socklen_t * optlen)2833 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2834 {
2835 int err;
2836 struct lwip_sock *sock = get_socket(s);
2837 #if !LWIP_TCPIP_CORE_LOCKING
2838 err_t cberr;
2839 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2840 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2841
2842 if (!sock) {
2843 return -1;
2844 }
2845
2846 if ((NULL == optval) || (NULL == optlen)) {
2847 sock_set_errno(sock, EFAULT);
2848 done_socket(sock);
2849 return -1;
2850 }
2851
2852 #if LWIP_TCPIP_CORE_LOCKING
2853 /* core-locking can just call the -impl function */
2854 LOCK_TCPIP_CORE();
2855 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2856 UNLOCK_TCPIP_CORE();
2857
2858 #else /* LWIP_TCPIP_CORE_LOCKING */
2859
2860 #if LWIP_MPU_COMPATIBLE
2861 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2862 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2863 sock_set_errno(sock, ENOBUFS);
2864 done_socket(sock);
2865 return -1;
2866 }
2867 #endif /* LWIP_MPU_COMPATIBLE */
2868
2869 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2870 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2871 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2872 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2873 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2874 #if !LWIP_MPU_COMPATIBLE
2875 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2876 #endif /* !LWIP_MPU_COMPATIBLE */
2877 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2878 #if LWIP_NETCONN_SEM_PER_THREAD
2879 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2880 #else
2881 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2882 #endif
2883 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2884 if (cberr != ERR_OK) {
2885 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2886 sock_set_errno(sock, err_to_errno(cberr));
2887 done_socket(sock);
2888 return -1;
2889 }
2890 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2891
2892 /* write back optlen and optval */
2893 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2894 #if LWIP_MPU_COMPATIBLE
2895 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2896 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2897 #endif /* LWIP_MPU_COMPATIBLE */
2898
2899 /* maybe lwip_getsockopt_internal has changed err */
2900 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2901 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2902 #endif /* LWIP_TCPIP_CORE_LOCKING */
2903
2904 sock_set_errno(sock, err);
2905 done_socket(sock);
2906 return err ? -1 : 0;
2907 }
2908
2909 #if !LWIP_TCPIP_CORE_LOCKING
2910 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2911 * to get into the tcpip_thread
2912 */
2913 static void
lwip_getsockopt_callback(void * arg)2914 lwip_getsockopt_callback(void *arg)
2915 {
2916 struct lwip_setgetsockopt_data *data;
2917 LWIP_ASSERT("arg != NULL", arg != NULL);
2918 data = (struct lwip_setgetsockopt_data *)arg;
2919
2920 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2921 #if LWIP_MPU_COMPATIBLE
2922 data->optval,
2923 #else /* LWIP_MPU_COMPATIBLE */
2924 data->optval.p,
2925 #endif /* LWIP_MPU_COMPATIBLE */
2926 &data->optlen);
2927
2928 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2929 }
2930 #endif /* LWIP_TCPIP_CORE_LOCKING */
2931
2932 static int
lwip_sockopt_to_ipopt(int optname)2933 lwip_sockopt_to_ipopt(int optname)
2934 {
2935 /* Map SO_* values to our internal SOF_* values
2936 * We should not rely on #defines in socket.h
2937 * being in sync with ip.h.
2938 */
2939 switch (optname) {
2940 case SO_BROADCAST:
2941 return SOF_BROADCAST;
2942 case SO_KEEPALIVE:
2943 return SOF_KEEPALIVE;
2944 case SO_REUSEADDR:
2945 return SOF_REUSEADDR;
2946 default:
2947 LWIP_ASSERT("Unknown socket option", 0);
2948 return 0;
2949 }
2950 }
2951
2952 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2953 * same argument as lwip_getsockopt, either called directly or through callback
2954 */
2955 static int
lwip_getsockopt_impl(int s,int level,int optname,void * optval,socklen_t * optlen)2956 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2957 {
2958 int err = 0;
2959 struct lwip_sock *sock = tryget_socket(s);
2960 if (!sock) {
2961 return EBADF;
2962 }
2963
2964 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2965 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2966 return err;
2967 }
2968 #endif
2969
2970 switch (level) {
2971
2972 /* Level: SOL_SOCKET */
2973 case SOL_SOCKET:
2974 switch (optname) {
2975
2976 #if LWIP_TCP
2977 case SO_ACCEPTCONN:
2978 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2979 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2980 done_socket(sock);
2981 return ENOPROTOOPT;
2982 }
2983 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2984 *(int *)optval = 1;
2985 } else {
2986 *(int *)optval = 0;
2987 }
2988 break;
2989 #endif /* LWIP_TCP */
2990
2991 /* The option flags */
2992 case SO_BROADCAST:
2993 case SO_KEEPALIVE:
2994 #if SO_REUSE
2995 case SO_REUSEADDR:
2996 #endif /* SO_REUSE */
2997 if ((optname == SO_BROADCAST) &&
2998 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
2999 done_socket(sock);
3000 return ENOPROTOOPT;
3001 }
3002
3003 optname = lwip_sockopt_to_ipopt(optname);
3004
3005 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3006 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
3007 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
3008 s, optname, (*(int *)optval ? "on" : "off")));
3009 break;
3010
3011 case SO_TYPE:
3012 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3013 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3014 case NETCONN_RAW:
3015 *(int *)optval = SOCK_RAW;
3016 break;
3017 case NETCONN_TCP:
3018 *(int *)optval = SOCK_STREAM;
3019 break;
3020 case NETCONN_UDP:
3021 *(int *)optval = SOCK_DGRAM;
3022 break;
3023 default: /* unrecognized socket type */
3024 *(int *)optval = netconn_type(sock->conn);
3025 LWIP_DEBUGF(SOCKETS_DEBUG,
3026 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
3027 s, *(int *)optval));
3028 } /* switch (netconn_type(sock->conn)) */
3029 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
3030 s, *(int *)optval));
3031 break;
3032
3033 case SO_ERROR:
3034 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
3035 *(int *)optval = err_to_errno(netconn_err(sock->conn));
3036 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
3037 s, *(int *)optval));
3038 break;
3039
3040 #if LWIP_SO_SNDTIMEO
3041 case SO_SNDTIMEO:
3042 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3043 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
3044 break;
3045 #endif /* LWIP_SO_SNDTIMEO */
3046 #if LWIP_SO_RCVTIMEO
3047 case SO_RCVTIMEO:
3048 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3049 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
3050 break;
3051 #endif /* LWIP_SO_RCVTIMEO */
3052 #if LWIP_SO_RCVBUF
3053 case SO_RCVBUF:
3054 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3055 *(int *)optval = netconn_get_recvbufsize(sock->conn);
3056 break;
3057 #endif /* LWIP_SO_RCVBUF */
3058 #if LWIP_SO_LINGER
3059 case SO_LINGER: {
3060 s16_t conn_linger;
3061 struct linger *linger = (struct linger *)optval;
3062 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
3063 conn_linger = sock->conn->linger;
3064 if (conn_linger >= 0) {
3065 linger->l_onoff = 1;
3066 linger->l_linger = (int)conn_linger;
3067 } else {
3068 linger->l_onoff = 0;
3069 linger->l_linger = 0;
3070 }
3071 }
3072 break;
3073 #endif /* LWIP_SO_LINGER */
3074 #if LWIP_UDP
3075 case SO_NO_CHECK:
3076 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3077 #if LWIP_UDPLITE
3078 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3079 /* this flag is only available for UDP, not for UDP lite */
3080 done_socket(sock);
3081 return EAFNOSUPPORT;
3082 }
3083 #endif /* LWIP_UDPLITE */
3084 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3085 break;
3086 #endif /* LWIP_UDP*/
3087 default:
3088 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3089 s, optname));
3090 err = ENOPROTOOPT;
3091 break;
3092 } /* switch (optname) */
3093 break;
3094
3095 /* Level: IPPROTO_IP */
3096 case IPPROTO_IP:
3097 switch (optname) {
3098 case IP_TTL:
3099 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3100 *(int *)optval = sock->conn->pcb.ip->ttl;
3101 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3102 s, *(int *)optval));
3103 break;
3104 case IP_TOS:
3105 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3106 *(int *)optval = sock->conn->pcb.ip->tos;
3107 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3108 s, *(int *)optval));
3109 break;
3110 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3111 case IP_MULTICAST_TTL:
3112 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3113 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3114 done_socket(sock);
3115 return ENOPROTOOPT;
3116 }
3117 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3118 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3119 s, *(int *)optval));
3120 break;
3121 case IP_MULTICAST_IF:
3122 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3123 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3124 done_socket(sock);
3125 return ENOPROTOOPT;
3126 }
3127 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3128 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3129 s, *(u32_t *)optval));
3130 break;
3131 case IP_MULTICAST_LOOP:
3132 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3133 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3134 *(u8_t *)optval = 1;
3135 } else {
3136 *(u8_t *)optval = 0;
3137 }
3138 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3139 s, *(int *)optval));
3140 break;
3141 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3142 default:
3143 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3144 s, optname));
3145 err = ENOPROTOOPT;
3146 break;
3147 } /* switch (optname) */
3148 break;
3149
3150 #if LWIP_TCP
3151 /* Level: IPPROTO_TCP */
3152 case IPPROTO_TCP:
3153 /* Special case: all IPPROTO_TCP option take an int */
3154 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3155 if (sock->conn->pcb.tcp->state == LISTEN) {
3156 done_socket(sock);
3157 return EINVAL;
3158 }
3159 switch (optname) {
3160 case TCP_NODELAY:
3161 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3162 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3163 s, (*(int *)optval) ? "on" : "off") );
3164 break;
3165 case TCP_KEEPALIVE:
3166 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3167 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3168 s, *(int *)optval));
3169 break;
3170
3171 #if LWIP_TCP_KEEPALIVE
3172 case TCP_KEEPIDLE:
3173 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3174 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3175 s, *(int *)optval));
3176 break;
3177 case TCP_KEEPINTVL:
3178 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3179 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3180 s, *(int *)optval));
3181 break;
3182 case TCP_KEEPCNT:
3183 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3184 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3185 s, *(int *)optval));
3186 break;
3187 #endif /* LWIP_TCP_KEEPALIVE */
3188 default:
3189 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3190 s, optname));
3191 err = ENOPROTOOPT;
3192 break;
3193 } /* switch (optname) */
3194 break;
3195 #endif /* LWIP_TCP */
3196
3197 #if LWIP_IPV6
3198 /* Level: IPPROTO_IPV6 */
3199 case IPPROTO_IPV6:
3200 switch (optname) {
3201 case IPV6_V6ONLY:
3202 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3203 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3204 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3205 s, *(int *)optval));
3206 break;
3207 default:
3208 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3209 s, optname));
3210 err = ENOPROTOOPT;
3211 break;
3212 } /* switch (optname) */
3213 break;
3214 #endif /* LWIP_IPV6 */
3215
3216 #if LWIP_UDP && LWIP_UDPLITE
3217 /* Level: IPPROTO_UDPLITE */
3218 case IPPROTO_UDPLITE:
3219 /* Special case: all IPPROTO_UDPLITE option take an int */
3220 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3221 /* If this is no UDP lite socket, ignore any options. */
3222 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3223 done_socket(sock);
3224 return ENOPROTOOPT;
3225 }
3226 switch (optname) {
3227 case UDPLITE_SEND_CSCOV:
3228 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3229 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3230 s, (*(int *)optval)) );
3231 break;
3232 case UDPLITE_RECV_CSCOV:
3233 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3234 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3235 s, (*(int *)optval)) );
3236 break;
3237 default:
3238 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3239 s, optname));
3240 err = ENOPROTOOPT;
3241 break;
3242 } /* switch (optname) */
3243 break;
3244 #endif /* LWIP_UDP */
3245 /* Level: IPPROTO_RAW */
3246 case IPPROTO_RAW:
3247 switch (optname) {
3248 #if LWIP_IPV6 && LWIP_RAW
3249 case IPV6_CHECKSUM:
3250 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3251 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3252 *(int *)optval = -1;
3253 } else {
3254 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3255 }
3256 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3257 s, (*(int *)optval)) );
3258 break;
3259 #endif /* LWIP_IPV6 && LWIP_RAW */
3260 default:
3261 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3262 s, optname));
3263 err = ENOPROTOOPT;
3264 break;
3265 } /* switch (optname) */
3266 break;
3267 default:
3268 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3269 s, level, optname));
3270 err = ENOPROTOOPT;
3271 break;
3272 } /* switch (level) */
3273
3274 done_socket(sock);
3275 return err;
3276 }
3277
3278 int
lwip_setsockopt(int s,int level,int optname,const void * optval,socklen_t optlen)3279 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3280 {
3281 int err = 0;
3282 struct lwip_sock *sock = get_socket(s);
3283 #if !LWIP_TCPIP_CORE_LOCKING
3284 err_t cberr;
3285 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3286 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3287
3288 if (!sock) {
3289 return -1;
3290 }
3291
3292 if (NULL == optval) {
3293 sock_set_errno(sock, EFAULT);
3294 done_socket(sock);
3295 return -1;
3296 }
3297
3298 #if LWIP_TCPIP_CORE_LOCKING
3299 /* core-locking can just call the -impl function */
3300 LOCK_TCPIP_CORE();
3301 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3302 UNLOCK_TCPIP_CORE();
3303
3304 #else /* LWIP_TCPIP_CORE_LOCKING */
3305
3306 #if LWIP_MPU_COMPATIBLE
3307 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3308 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3309 sock_set_errno(sock, ENOBUFS);
3310 done_socket(sock);
3311 return -1;
3312 }
3313 #endif /* LWIP_MPU_COMPATIBLE */
3314
3315 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3316 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3317 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3318 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3319 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3320 #if LWIP_MPU_COMPATIBLE
3321 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3322 #else /* LWIP_MPU_COMPATIBLE */
3323 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3324 #endif /* LWIP_MPU_COMPATIBLE */
3325 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3326 #if LWIP_NETCONN_SEM_PER_THREAD
3327 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3328 #else
3329 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3330 #endif
3331 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3332 if (cberr != ERR_OK) {
3333 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3334 sock_set_errno(sock, err_to_errno(cberr));
3335 done_socket(sock);
3336 return -1;
3337 }
3338 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3339
3340 /* maybe lwip_getsockopt_internal has changed err */
3341 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3342 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3343 #endif /* LWIP_TCPIP_CORE_LOCKING */
3344
3345 sock_set_errno(sock, err);
3346 done_socket(sock);
3347 return err ? -1 : 0;
3348 }
3349
3350 #if !LWIP_TCPIP_CORE_LOCKING
3351 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3352 * to get into the tcpip_thread
3353 */
3354 static void
lwip_setsockopt_callback(void * arg)3355 lwip_setsockopt_callback(void *arg)
3356 {
3357 struct lwip_setgetsockopt_data *data;
3358 LWIP_ASSERT("arg != NULL", arg != NULL);
3359 data = (struct lwip_setgetsockopt_data *)arg;
3360
3361 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3362 #if LWIP_MPU_COMPATIBLE
3363 data->optval,
3364 #else /* LWIP_MPU_COMPATIBLE */
3365 data->optval.pc,
3366 #endif /* LWIP_MPU_COMPATIBLE */
3367 data->optlen);
3368
3369 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3370 }
3371 #endif /* LWIP_TCPIP_CORE_LOCKING */
3372
3373 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3374 * same argument as lwip_setsockopt, either called directly or through callback
3375 */
3376 static int
lwip_setsockopt_impl(int s,int level,int optname,const void * optval,socklen_t optlen)3377 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3378 {
3379 int err = 0;
3380 struct lwip_sock *sock = tryget_socket(s);
3381 if (!sock) {
3382 return EBADF;
3383 }
3384
3385 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3386 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3387 return err;
3388 }
3389 #endif
3390
3391 switch (level) {
3392
3393 /* Level: SOL_SOCKET */
3394 case SOL_SOCKET:
3395 switch (optname) {
3396
3397 /* SO_ACCEPTCONN is get-only */
3398
3399 /* The option flags */
3400 case SO_BROADCAST:
3401 case SO_KEEPALIVE:
3402 #if SO_REUSE
3403 case SO_REUSEADDR:
3404 #endif /* SO_REUSE */
3405 if ((optname == SO_BROADCAST) &&
3406 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3407 done_socket(sock);
3408 return ENOPROTOOPT;
3409 }
3410
3411 optname = lwip_sockopt_to_ipopt(optname);
3412
3413 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3414 if (*(const int *)optval) {
3415 ip_set_option(sock->conn->pcb.ip, optname);
3416 } else {
3417 ip_reset_option(sock->conn->pcb.ip, optname);
3418 }
3419 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3420 s, optname, (*(const int *)optval ? "on" : "off")));
3421 break;
3422
3423 /* SO_TYPE is get-only */
3424 /* SO_ERROR is get-only */
3425
3426 #if LWIP_SO_SNDTIMEO
3427 case SO_SNDTIMEO: {
3428 long ms_long;
3429 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3430 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3431 if (ms_long < 0) {
3432 done_socket(sock);
3433 return EINVAL;
3434 }
3435 netconn_set_sendtimeout(sock->conn, ms_long);
3436 break;
3437 }
3438 #endif /* LWIP_SO_SNDTIMEO */
3439 #if LWIP_SO_RCVTIMEO
3440 case SO_RCVTIMEO: {
3441 long ms_long;
3442 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3443 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3444 if (ms_long < 0) {
3445 done_socket(sock);
3446 return EINVAL;
3447 }
3448 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3449 break;
3450 }
3451 #endif /* LWIP_SO_RCVTIMEO */
3452 #if LWIP_SO_RCVBUF
3453 case SO_RCVBUF:
3454 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3455 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3456 break;
3457 #endif /* LWIP_SO_RCVBUF */
3458 #if LWIP_SO_LINGER
3459 case SO_LINGER: {
3460 const struct linger *linger = (const struct linger *)optval;
3461 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3462 if (linger->l_onoff) {
3463 int lingersec = linger->l_linger;
3464 if (lingersec < 0) {
3465 done_socket(sock);
3466 return EINVAL;
3467 }
3468 if (lingersec > 0xFFFF) {
3469 lingersec = 0xFFFF;
3470 }
3471 sock->conn->linger = (s16_t)lingersec;
3472 } else {
3473 sock->conn->linger = -1;
3474 }
3475 }
3476 break;
3477 #endif /* LWIP_SO_LINGER */
3478 #if LWIP_UDP
3479 case SO_NO_CHECK:
3480 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3481 #if LWIP_UDPLITE
3482 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3483 /* this flag is only available for UDP, not for UDP lite */
3484 done_socket(sock);
3485 return EAFNOSUPPORT;
3486 }
3487 #endif /* LWIP_UDPLITE */
3488 if (*(const int *)optval) {
3489 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3490 } else {
3491 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3492 }
3493 break;
3494 #endif /* LWIP_UDP */
3495 case SO_BINDTODEVICE: {
3496 const struct ifreq *iface;
3497 struct netif *n = NULL;
3498
3499 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3500
3501 iface = (const struct ifreq *)optval;
3502 if (iface->ifr_name[0] != 0) {
3503 n = netif_find(iface->ifr_name);
3504 if (n == NULL) {
3505 done_socket(sock);
3506 return ENODEV;
3507 }
3508 }
3509
3510 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3511 #if LWIP_TCP
3512 case NETCONN_TCP:
3513 tcp_bind_netif(sock->conn->pcb.tcp, n);
3514 break;
3515 #endif
3516 #if LWIP_UDP
3517 case NETCONN_UDP:
3518 udp_bind_netif(sock->conn->pcb.udp, n);
3519 break;
3520 #endif
3521 #if LWIP_RAW
3522 case NETCONN_RAW:
3523 raw_bind_netif(sock->conn->pcb.raw, n);
3524 break;
3525 #endif
3526 default:
3527 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3528 break;
3529 }
3530 }
3531 break;
3532 default:
3533 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3534 s, optname));
3535 err = ENOPROTOOPT;
3536 break;
3537 } /* switch (optname) */
3538 break;
3539
3540 /* Level: IPPROTO_IP */
3541 case IPPROTO_IP:
3542 switch (optname) {
3543 case IP_TTL:
3544 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3545 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3546 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3547 s, sock->conn->pcb.ip->ttl));
3548 break;
3549 case IP_TOS:
3550 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3551 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3552 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3553 s, sock->conn->pcb.ip->tos));
3554 break;
3555 #if LWIP_NETBUF_RECVINFO
3556 case IP_PKTINFO:
3557 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3558 if (*(const int *)optval) {
3559 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3560 } else {
3561 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3562 }
3563 break;
3564 #endif /* LWIP_NETBUF_RECVINFO */
3565 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3566 case IP_MULTICAST_TTL:
3567 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3568 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3569 break;
3570 case IP_MULTICAST_IF: {
3571 ip4_addr_t if_addr;
3572 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3573 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3574 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3575 }
3576 break;
3577 case IP_MULTICAST_LOOP:
3578 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3579 if (*(const u8_t *)optval) {
3580 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3581 } else {
3582 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3583 }
3584 break;
3585 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3586 #if LWIP_IGMP
3587 case IP_ADD_MEMBERSHIP:
3588 case IP_DROP_MEMBERSHIP: {
3589 /* If this is a TCP or a RAW socket, ignore these options. */
3590 err_t igmp_err;
3591 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3592 ip4_addr_t if_addr;
3593 ip4_addr_t multi_addr;
3594 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3595 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3596 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3597 if (optname == IP_ADD_MEMBERSHIP) {
3598 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3599 /* cannot track membership (out of memory) */
3600 err = ENOMEM;
3601 igmp_err = ERR_OK;
3602 } else {
3603 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3604 }
3605 } else {
3606 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3607 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3608 }
3609 if (igmp_err != ERR_OK) {
3610 err = EADDRNOTAVAIL;
3611 }
3612 }
3613 break;
3614 #endif /* LWIP_IGMP */
3615 default:
3616 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3617 s, optname));
3618 err = ENOPROTOOPT;
3619 break;
3620 } /* switch (optname) */
3621 break;
3622
3623 #if LWIP_TCP
3624 /* Level: IPPROTO_TCP */
3625 case IPPROTO_TCP:
3626 /* Special case: all IPPROTO_TCP option take an int */
3627 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3628 if (sock->conn->pcb.tcp->state == LISTEN) {
3629 done_socket(sock);
3630 return EINVAL;
3631 }
3632 switch (optname) {
3633 case TCP_NODELAY:
3634 if (*(const int *)optval) {
3635 tcp_nagle_disable(sock->conn->pcb.tcp);
3636 } else {
3637 tcp_nagle_enable(sock->conn->pcb.tcp);
3638 }
3639 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3640 s, (*(const int *)optval) ? "on" : "off") );
3641 break;
3642 case TCP_KEEPALIVE:
3643 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3644 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3645 s, sock->conn->pcb.tcp->keep_idle));
3646 break;
3647
3648 #if LWIP_TCP_KEEPALIVE
3649 case TCP_KEEPIDLE:
3650 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3651 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3652 s, sock->conn->pcb.tcp->keep_idle));
3653 break;
3654 case TCP_KEEPINTVL:
3655 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3656 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3657 s, sock->conn->pcb.tcp->keep_intvl));
3658 break;
3659 case TCP_KEEPCNT:
3660 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3661 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3662 s, sock->conn->pcb.tcp->keep_cnt));
3663 break;
3664 #endif /* LWIP_TCP_KEEPALIVE */
3665 default:
3666 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3667 s, optname));
3668 err = ENOPROTOOPT;
3669 break;
3670 } /* switch (optname) */
3671 break;
3672 #endif /* LWIP_TCP*/
3673
3674 #if LWIP_IPV6
3675 /* Level: IPPROTO_IPV6 */
3676 case IPPROTO_IPV6:
3677 switch (optname) {
3678 case IPV6_V6ONLY:
3679 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3680 if (*(const int *)optval) {
3681 netconn_set_ipv6only(sock->conn, 1);
3682 } else {
3683 netconn_set_ipv6only(sock->conn, 0);
3684 }
3685 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3686 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3687 break;
3688 #if LWIP_IPV6_MLD
3689 case IPV6_JOIN_GROUP:
3690 case IPV6_LEAVE_GROUP: {
3691 /* If this is a TCP or a RAW socket, ignore these options. */
3692 err_t mld6_err;
3693 struct netif *netif;
3694 ip6_addr_t multi_addr;
3695 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3696 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3697 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3698 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3699 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3700 if (netif == NULL) {
3701 err = EADDRNOTAVAIL;
3702 break;
3703 }
3704
3705 if (optname == IPV6_JOIN_GROUP) {
3706 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3707 /* cannot track membership (out of memory) */
3708 err = ENOMEM;
3709 mld6_err = ERR_OK;
3710 } else {
3711 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3712 }
3713 } else {
3714 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3715 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3716 }
3717 if (mld6_err != ERR_OK) {
3718 err = EADDRNOTAVAIL;
3719 }
3720 }
3721 break;
3722 #endif /* LWIP_IPV6_MLD */
3723 default:
3724 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3725 s, optname));
3726 err = ENOPROTOOPT;
3727 break;
3728 } /* switch (optname) */
3729 break;
3730 #endif /* LWIP_IPV6 */
3731
3732 #if LWIP_UDP && LWIP_UDPLITE
3733 /* Level: IPPROTO_UDPLITE */
3734 case IPPROTO_UDPLITE:
3735 /* Special case: all IPPROTO_UDPLITE option take an int */
3736 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3737 /* If this is no UDP lite socket, ignore any options. */
3738 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3739 done_socket(sock);
3740 return ENOPROTOOPT;
3741 }
3742 switch (optname) {
3743 case UDPLITE_SEND_CSCOV:
3744 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3745 /* don't allow illegal values! */
3746 sock->conn->pcb.udp->chksum_len_tx = 8;
3747 } else {
3748 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3749 }
3750 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3751 s, (*(const int *)optval)) );
3752 break;
3753 case UDPLITE_RECV_CSCOV:
3754 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3755 /* don't allow illegal values! */
3756 sock->conn->pcb.udp->chksum_len_rx = 8;
3757 } else {
3758 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3759 }
3760 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3761 s, (*(const int *)optval)) );
3762 break;
3763 default:
3764 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3765 s, optname));
3766 err = ENOPROTOOPT;
3767 break;
3768 } /* switch (optname) */
3769 break;
3770 #endif /* LWIP_UDP */
3771 /* Level: IPPROTO_RAW */
3772 case IPPROTO_RAW:
3773 switch (optname) {
3774 #if LWIP_IPV6 && LWIP_RAW
3775 case IPV6_CHECKSUM:
3776 /* It should not be possible to disable the checksum generation with ICMPv6
3777 * as per RFC 3542 chapter 3.1 */
3778 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3779 done_socket(sock);
3780 return EINVAL;
3781 }
3782
3783 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3784 if (*(const int *)optval < 0) {
3785 sock->conn->pcb.raw->chksum_reqd = 0;
3786 } else if (*(const int *)optval & 1) {
3787 /* Per RFC3542, odd offsets are not allowed */
3788 done_socket(sock);
3789 return EINVAL;
3790 } else {
3791 sock->conn->pcb.raw->chksum_reqd = 1;
3792 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3793 }
3794 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3795 s, sock->conn->pcb.raw->chksum_reqd));
3796 break;
3797 #endif /* LWIP_IPV6 && LWIP_RAW */
3798 default:
3799 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3800 s, optname));
3801 err = ENOPROTOOPT;
3802 break;
3803 } /* switch (optname) */
3804 break;
3805 default:
3806 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3807 s, level, optname));
3808 err = ENOPROTOOPT;
3809 break;
3810 } /* switch (level) */
3811
3812 done_socket(sock);
3813 return err;
3814 }
3815
3816 int
lwip_ioctl(int s,long cmd,void * argp)3817 lwip_ioctl(int s, long cmd, void *argp)
3818 {
3819 struct lwip_sock *sock = get_socket(s);
3820 u8_t val;
3821 #if LWIP_SO_RCVBUF
3822 int recv_avail;
3823 #endif /* LWIP_SO_RCVBUF */
3824
3825 if (!sock) {
3826 return -1;
3827 }
3828
3829 switch (cmd) {
3830 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3831 case FIONREAD:
3832 if (!argp) {
3833 sock_set_errno(sock, EINVAL);
3834 done_socket(sock);
3835 return -1;
3836 }
3837 #if LWIP_FIONREAD_LINUXMODE
3838 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3839 struct netbuf *nb;
3840 if (sock->lastdata.netbuf) {
3841 nb = sock->lastdata.netbuf;
3842 *((int *)argp) = nb->p->tot_len;
3843 } else {
3844 struct netbuf *rxbuf;
3845 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3846 if (err != ERR_OK) {
3847 *((int *)argp) = 0;
3848 } else {
3849 sock->lastdata.netbuf = rxbuf;
3850 *((int *)argp) = rxbuf->p->tot_len;
3851 }
3852 }
3853 done_socket(sock);
3854 return 0;
3855 }
3856 #endif /* LWIP_FIONREAD_LINUXMODE */
3857
3858 #if LWIP_SO_RCVBUF
3859 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3860 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3861 if (recv_avail < 0) {
3862 recv_avail = 0;
3863 }
3864
3865 /* Check if there is data left from the last recv operation. /maq 041215 */
3866 if (sock->lastdata.netbuf) {
3867 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3868 recv_avail += sock->lastdata.pbuf->tot_len;
3869 } else {
3870 recv_avail += sock->lastdata.netbuf->p->tot_len;
3871 }
3872 }
3873 *((int *)argp) = recv_avail;
3874
3875 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3876 sock_set_errno(sock, 0);
3877 done_socket(sock);
3878 return 0;
3879 #else /* LWIP_SO_RCVBUF */
3880 break;
3881 #endif /* LWIP_SO_RCVBUF */
3882 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3883
3884 case (long)FIONBIO:
3885 val = 0;
3886 if (argp && *(int *)argp) {
3887 val = 1;
3888 }
3889 netconn_set_nonblocking(sock->conn, val);
3890 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3891 sock_set_errno(sock, 0);
3892 done_socket(sock);
3893 return 0;
3894
3895 default:
3896 IOCTL_CMD_CASE_HANDLER();
3897 break;
3898 } /* switch (cmd) */
3899 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3900 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3901 done_socket(sock);
3902 return -1;
3903 }
3904
3905 /** A minimal implementation of fcntl.
3906 * Currently only the commands F_GETFL and F_SETFL are implemented.
3907 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3908 * the flag O_NONBLOCK is implemented for F_SETFL.
3909 */
3910 int
lwip_fcntl(int s,int cmd,int val)3911 lwip_fcntl(int s, int cmd, int val)
3912 {
3913 struct lwip_sock *sock = get_socket(s);
3914 int ret = -1;
3915 int op_mode = 0;
3916
3917 if (!sock) {
3918 return -1;
3919 }
3920
3921 switch (cmd) {
3922 case F_GETFL:
3923 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3924 sock_set_errno(sock, 0);
3925
3926 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3927 #if LWIP_TCPIP_CORE_LOCKING
3928 LOCK_TCPIP_CORE();
3929 #else
3930 SYS_ARCH_DECL_PROTECT(lev);
3931 /* the proper thing to do here would be to get into the tcpip_thread,
3932 but locking should be OK as well since we only *read* some flags */
3933 SYS_ARCH_PROTECT(lev);
3934 #endif
3935 #if LWIP_TCP
3936 if (sock->conn->pcb.tcp) {
3937 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3938 op_mode |= O_RDONLY;
3939 }
3940 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3941 op_mode |= O_WRONLY;
3942 }
3943 }
3944 #endif
3945 #if LWIP_TCPIP_CORE_LOCKING
3946 UNLOCK_TCPIP_CORE();
3947 #else
3948 SYS_ARCH_UNPROTECT(lev);
3949 #endif
3950 } else {
3951 op_mode |= O_RDWR;
3952 }
3953
3954 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3955 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3956
3957 break;
3958 case F_SETFL:
3959 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3960 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3961 if ((val & ~O_NONBLOCK) == 0) {
3962 /* only O_NONBLOCK, all other bits are zero */
3963 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3964 ret = 0;
3965 sock_set_errno(sock, 0);
3966 } else {
3967 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3968 }
3969 break;
3970 default:
3971 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3972 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3973 break;
3974 }
3975 done_socket(sock);
3976 return ret;
3977 }
3978
3979 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3980 int
fcntl(int s,int cmd,...)3981 fcntl(int s, int cmd, ...)
3982 {
3983 va_list ap;
3984 int val;
3985
3986 va_start(ap, cmd);
3987 val = va_arg(ap, int);
3988 va_end(ap);
3989 return lwip_fcntl(s, cmd, val);
3990 }
3991 #endif
3992
3993 const char *
lwip_inet_ntop(int af,const void * src,char * dst,socklen_t size)3994 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
3995 {
3996 const char *ret = NULL;
3997 int size_int = (int)size;
3998 if (size_int < 0) {
3999 set_errno(ENOSPC);
4000 return NULL;
4001 }
4002 switch (af) {
4003 #if LWIP_IPV4
4004 case AF_INET:
4005 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
4006 if (ret == NULL) {
4007 set_errno(ENOSPC);
4008 }
4009 break;
4010 #endif
4011 #if LWIP_IPV6
4012 case AF_INET6:
4013 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
4014 if (ret == NULL) {
4015 set_errno(ENOSPC);
4016 }
4017 break;
4018 #endif
4019 default:
4020 set_errno(EAFNOSUPPORT);
4021 break;
4022 }
4023 return ret;
4024 }
4025
4026 int
lwip_inet_pton(int af,const char * src,void * dst)4027 lwip_inet_pton(int af, const char *src, void *dst)
4028 {
4029 int err;
4030 switch (af) {
4031 #if LWIP_IPV4
4032 case AF_INET:
4033 err = ip4addr_aton(src, (ip4_addr_t *)dst);
4034 break;
4035 #endif
4036 #if LWIP_IPV6
4037 case AF_INET6: {
4038 /* convert into temporary variable since ip6_addr_t might be larger
4039 than in6_addr when scopes are enabled */
4040 ip6_addr_t addr;
4041 err = ip6addr_aton(src, &addr);
4042 if (err) {
4043 memcpy(dst, &addr.addr, sizeof(addr.addr));
4044 }
4045 break;
4046 }
4047 #endif
4048 default:
4049 err = -1;
4050 set_errno(EAFNOSUPPORT);
4051 break;
4052 }
4053 return err;
4054 }
4055
4056 #if LWIP_IGMP
4057 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
4058 *
4059 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4060 *
4061 * @return 1 on success, 0 on failure
4062 */
4063 static int
lwip_socket_register_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4064 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4065 {
4066 struct lwip_sock *sock = get_socket(s);
4067 int i;
4068
4069 if (!sock) {
4070 return 0;
4071 }
4072
4073 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4074 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4075 socket_ipv4_multicast_memberships[i].sock = sock;
4076 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4077 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4078 done_socket(sock);
4079 return 1;
4080 }
4081 }
4082 done_socket(sock);
4083 return 0;
4084 }
4085
4086 /** Unregister a previously registered membership. This prevents dropping the membership
4087 * on socket close.
4088 *
4089 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4090 */
4091 static void
lwip_socket_unregister_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4092 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4093 {
4094 struct lwip_sock *sock = get_socket(s);
4095 int i;
4096
4097 if (!sock) {
4098 return;
4099 }
4100
4101 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4102 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4103 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4104 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4105 socket_ipv4_multicast_memberships[i].sock = NULL;
4106 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4107 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4108 break;
4109 }
4110 }
4111 done_socket(sock);
4112 }
4113
4114 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4115 *
4116 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4117 */
4118 static void
lwip_socket_drop_registered_memberships(int s)4119 lwip_socket_drop_registered_memberships(int s)
4120 {
4121 struct lwip_sock *sock = get_socket(s);
4122 int i;
4123
4124 if (!sock) {
4125 return;
4126 }
4127
4128 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4129 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4130 ip_addr_t multi_addr, if_addr;
4131 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4132 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4133 socket_ipv4_multicast_memberships[i].sock = NULL;
4134 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4135 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4136
4137 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4138 }
4139 }
4140 done_socket(sock);
4141 }
4142 #endif /* LWIP_IGMP */
4143
4144 #if LWIP_IPV6_MLD
4145 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4146 *
4147 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4148 *
4149 * @return 1 on success, 0 on failure
4150 */
4151 static int
lwip_socket_register_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4152 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4153 {
4154 struct lwip_sock *sock = get_socket(s);
4155 int i;
4156
4157 if (!sock) {
4158 return 0;
4159 }
4160
4161 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4162 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4163 socket_ipv6_multicast_memberships[i].sock = sock;
4164 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4165 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4166 done_socket(sock);
4167 return 1;
4168 }
4169 }
4170 done_socket(sock);
4171 return 0;
4172 }
4173
4174 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4175 * on socket close.
4176 *
4177 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4178 */
4179 static void
lwip_socket_unregister_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4180 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4181 {
4182 struct lwip_sock *sock = get_socket(s);
4183 int i;
4184
4185 if (!sock) {
4186 return;
4187 }
4188
4189 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4190 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4191 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4192 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4193 socket_ipv6_multicast_memberships[i].sock = NULL;
4194 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4195 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4196 break;
4197 }
4198 }
4199 done_socket(sock);
4200 }
4201
4202 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4203 *
4204 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4205 */
4206 static void
lwip_socket_drop_registered_mld6_memberships(int s)4207 lwip_socket_drop_registered_mld6_memberships(int s)
4208 {
4209 struct lwip_sock *sock = get_socket(s);
4210 int i;
4211
4212 if (!sock) {
4213 return;
4214 }
4215
4216 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4217 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4218 ip_addr_t multi_addr;
4219 u8_t if_idx;
4220
4221 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4222 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4223
4224 socket_ipv6_multicast_memberships[i].sock = NULL;
4225 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4226 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4227
4228 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4229 }
4230 }
4231 done_socket(sock);
4232 }
4233 #endif /* LWIP_IPV6_MLD */
4234
4235 #endif /* LWIP_SOCKET */
4236