1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/igmp.h"
48 #include "lwip/inet.h"
49 #include "lwip/tcp.h"
50 #include "lwip/raw.h"
51 #include "lwip/udp.h"
52 #include "lwip/memp.h"
53 #include "lwip/pbuf.h"
54 #include "lwip/netif.h"
55 #include "lwip/priv/tcpip_priv.h"
56 #include "lwip/mld6.h"
57 #if LWIP_ENABLE_DISTRIBUTED_NET
58 #include "lwip/distributed_net/distributed_net.h"
59 #include "lwip/distributed_net/distributed_net_core.h"
60 #endif /* LWIP_ENABLE_DISTRIBUTED_NET */
61 #if LWIP_CHECKSUM_ON_COPY
62 #include "lwip/inet_chksum.h"
63 #endif
64
65 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
66 #include <stdarg.h>
67 #endif
68
69 #include <string.h>
70
71 #ifdef LWIP_HOOK_FILENAME
72 #include LWIP_HOOK_FILENAME
73 #endif
74
75 #if LWIP_LOWPOWER
76 #include "lwip/lowpower.h"
77 #endif
78
79 /* If the netconn API is not required publicly, then we include the necessary
80 files here to get the implementation */
81 #if !LWIP_NETCONN
82 #undef LWIP_NETCONN
83 #define LWIP_NETCONN 1
84 #include "api_msg.c"
85 #include "api_lib.c"
86 #include "netbuf.c"
87 #undef LWIP_NETCONN
88 #define LWIP_NETCONN 0
89 #endif
90
91 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
92 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
93 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
94 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
95
96 #if LWIP_IPV4
97 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
98 (sin)->sin_len = sizeof(struct sockaddr_in); \
99 (sin)->sin_family = AF_INET; \
100 (sin)->sin_port = lwip_htons((port)); \
101 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
102 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
103 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
104 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
105 (port) = lwip_ntohs((sin)->sin_port); }while(0)
106 #endif /* LWIP_IPV4 */
107
108 #if LWIP_IPV6
109 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
110 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
111 (sin6)->sin6_family = AF_INET6; \
112 (sin6)->sin6_port = lwip_htons((port)); \
113 (sin6)->sin6_flowinfo = 0; \
114 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
115 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
116 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
117 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
118 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
119 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
120 } \
121 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
122 #endif /* LWIP_IPV6 */
123
124 #if LWIP_IPV4 && LWIP_IPV6
125 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
126
127 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
128 ((namelen) == sizeof(struct sockaddr_in6)))
129 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
130 ((name)->sa_family == AF_INET6))
131 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
132 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
133 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
134 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
135 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
136 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
137 } else { \
138 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
139 } } while(0)
140 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
141 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
142 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
143 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
144 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
145 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
146 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
147 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
148 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
149 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
150 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
151 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
152 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
153 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
154 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
155 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
156 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
157 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
158 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
159 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
160 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
161 #endif /* LWIP_IPV6 */
162
163 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
164 IS_SOCK_ADDR_TYPE_VALID(name))
165 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
166 SOCK_ADDR_TYPE_MATCH(name, sock))
167 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
168
169
170 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
171 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
172 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
173 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
174 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
175 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
176 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
177 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
178 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
179 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
180
181
182 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
183 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
184 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
185 #if LWIP_MPU_COMPATIBLE
186 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
187 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
188 if (name == NULL) { \
189 sock_set_errno(sock, ENOMEM); \
190 done_socket(sock); \
191 return -1; \
192 } }while(0)
193 #else /* LWIP_MPU_COMPATIBLE */
194 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
195 #endif /* LWIP_MPU_COMPATIBLE */
196
197 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
198 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
199 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
200 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
201 #else
202 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
203 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
204 u32_t loc = (val); \
205 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
206 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
207 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
208 #endif
209
210
211 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
212 * sockaddr_in6 if instantiated.
213 */
214 union sockaddr_aligned {
215 struct sockaddr sa;
216 #if LWIP_IPV6
217 struct sockaddr_in6 sin6;
218 #endif /* LWIP_IPV6 */
219 #if LWIP_IPV4
220 struct sockaddr_in sin;
221 #endif /* LWIP_IPV4 */
222 };
223
224 /* Define the number of IPv4 multicast memberships, default is one per socket */
225 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
226 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
227 #endif
228
229 #if LWIP_IGMP
230 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
231 a socket is closed */
232 struct lwip_socket_multicast_pair {
233 /** the socket */
234 struct lwip_sock *sock;
235 /** the interface address */
236 ip4_addr_t if_addr;
237 /** the group address */
238 ip4_addr_t multi_addr;
239 };
240
241 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
242
243 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
244 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
245 static void lwip_socket_drop_registered_memberships(int s);
246 #endif /* LWIP_IGMP */
247
248 #if LWIP_IPV6_MLD
249 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
250 a socket is closed */
251 struct lwip_socket_multicast_mld6_pair {
252 /** the socket */
253 struct lwip_sock *sock;
254 /** the interface index */
255 u8_t if_idx;
256 /** the group address */
257 ip6_addr_t multi_addr;
258 };
259
260 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
261
262 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
263 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
264 static void lwip_socket_drop_registered_mld6_memberships(int s);
265 #endif /* LWIP_IPV6_MLD */
266
267 /** The global array of available sockets */
268 static struct lwip_sock sockets[NUM_SOCKETS];
269
270 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
271 #if LWIP_TCPIP_CORE_LOCKING
272 /* protect the select_cb_list using core lock */
273 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
274 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
275 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
276 #else /* LWIP_TCPIP_CORE_LOCKING */
277 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
278 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
279 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
280 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
281 /** This counter is increased from lwip_select when the list is changed
282 and checked in select_check_waiters to see if it has changed. */
283 static volatile int select_cb_ctr;
284 #endif /* LWIP_TCPIP_CORE_LOCKING */
285 /** The global list of tasks waiting for select */
286 static struct lwip_select_cb *select_cb_list;
287 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
288
289 #define sock_set_errno(sk, e) do { \
290 const int sockerr = (e); \
291 set_errno(sockerr); \
292 } while (0)
293
294 /* Forward declaration of some functions */
295 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
296 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
297 #define DEFAULT_SOCKET_EVENTCB event_callback
298 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
299 #else
300 #define DEFAULT_SOCKET_EVENTCB NULL
301 #endif
302 #if !LWIP_TCPIP_CORE_LOCKING
303 static void lwip_getsockopt_callback(void *arg);
304 static void lwip_setsockopt_callback(void *arg);
305 #endif
306 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
307 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
308 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
309 union lwip_sock_lastdata *lastdata);
310 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
311
312 #if LWIP_IPV4 && LWIP_IPV6
313 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)314 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
315 {
316 if ((sockaddr->sa_family) == AF_INET6) {
317 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
318 ipaddr->type = IPADDR_TYPE_V6;
319 } else {
320 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
321 ipaddr->type = IPADDR_TYPE_V4;
322 }
323 }
324 #endif /* LWIP_IPV4 && LWIP_IPV6 */
325
326 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
327 void
lwip_socket_thread_init(void)328 lwip_socket_thread_init(void)
329 {
330 netconn_thread_init();
331 }
332
333 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
334 void
lwip_socket_thread_cleanup(void)335 lwip_socket_thread_cleanup(void)
336 {
337 netconn_thread_cleanup();
338 }
339
340 #if LWIP_NETCONN_FULLDUPLEX
341 /* Thread-safe increment of sock->fd_used, with overflow check */
342 static int
sock_inc_used(struct lwip_sock * sock)343 sock_inc_used(struct lwip_sock *sock)
344 {
345 int ret;
346 SYS_ARCH_DECL_PROTECT(lev);
347
348 LWIP_ASSERT("sock != NULL", sock != NULL);
349
350 SYS_ARCH_PROTECT(lev);
351 if (sock->fd_free_pending) {
352 /* prevent new usage of this socket if free is pending */
353 ret = 0;
354 } else {
355 ++sock->fd_used;
356 ret = 1;
357 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
358 }
359 SYS_ARCH_UNPROTECT(lev);
360 return ret;
361 }
362
363 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
364 static int
sock_inc_used_locked(struct lwip_sock * sock)365 sock_inc_used_locked(struct lwip_sock *sock)
366 {
367 LWIP_ASSERT("sock != NULL", sock != NULL);
368
369 if (sock->fd_free_pending) {
370 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
371 return 0;
372 }
373
374 ++sock->fd_used;
375 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
376 return 1;
377 }
378
379 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
380 * released (and possibly reused) when used from more than one thread
381 * (e.g. read-while-write or close-while-write, etc)
382 * This function is called at the end of functions using (try)get_socket*().
383 */
384 static void
done_socket(struct lwip_sock * sock)385 done_socket(struct lwip_sock *sock)
386 {
387 int freed = 0;
388 int is_tcp = 0;
389 struct netconn *conn = NULL;
390 union lwip_sock_lastdata lastdata;
391 SYS_ARCH_DECL_PROTECT(lev);
392 LWIP_ASSERT("sock != NULL", sock != NULL);
393
394 SYS_ARCH_PROTECT(lev);
395 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
396 if (--sock->fd_used == 0) {
397 if (sock->fd_free_pending) {
398 /* free the socket */
399 sock->fd_used = 1;
400 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
401 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
402 }
403 }
404 SYS_ARCH_UNPROTECT(lev);
405
406 if (freed) {
407 free_socket_free_elements(is_tcp, conn, &lastdata);
408 }
409 }
410
411 #else /* LWIP_NETCONN_FULLDUPLEX */
412 #define sock_inc_used(sock) 1
413 #define sock_inc_used_locked(sock) 1
414 #define done_socket(sock)
415 #endif /* LWIP_NETCONN_FULLDUPLEX */
416
417 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
418 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)419 tryget_socket_unconn_nouse(int fd)
420 {
421 int s = fd - LWIP_SOCKET_OFFSET;
422 if ((s < 0) || (s >= NUM_SOCKETS)) {
423 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
424 return NULL;
425 }
426 return &sockets[s];
427 }
428
429 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)430 lwip_socket_dbg_get_socket(int fd)
431 {
432 return tryget_socket_unconn_nouse(fd);
433 }
434
435 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
436 static struct lwip_sock *
tryget_socket_unconn(int fd)437 tryget_socket_unconn(int fd)
438 {
439 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
440 if (ret != NULL) {
441 if (!sock_inc_used(ret)) {
442 return NULL;
443 }
444 }
445 return ret;
446 }
447
448 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
449 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)450 tryget_socket_unconn_locked(int fd)
451 {
452 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
453 if (ret != NULL) {
454 if (!sock_inc_used_locked(ret)) {
455 return NULL;
456 }
457 }
458 return ret;
459 }
460
461 /**
462 * Same as get_socket but doesn't set errno
463 *
464 * @param fd externally used socket index
465 * @return struct lwip_sock for the socket or NULL if not found
466 */
467 static struct lwip_sock *
tryget_socket(int fd)468 tryget_socket(int fd)
469 {
470 struct lwip_sock *sock = tryget_socket_unconn(fd);
471 if (sock != NULL) {
472 if (sock->conn) {
473 return sock;
474 }
475 done_socket(sock);
476 }
477 return NULL;
478 }
479
480 /**
481 * Map a externally used socket index to the internal socket representation.
482 *
483 * @param fd externally used socket index
484 * @return struct lwip_sock for the socket or NULL if not found
485 */
486 static struct lwip_sock *
get_socket(int fd)487 get_socket(int fd)
488 {
489 struct lwip_sock *sock = tryget_socket(fd);
490 if (!sock) {
491 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
492 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
493 }
494 set_errno(EBADF);
495 return NULL;
496 }
497 return sock;
498 }
499
500 /**
501 * Allocate a new socket for a given netconn.
502 *
503 * @param newconn the netconn for which to allocate a socket
504 * @param accepted 1 if socket has been created by accept(),
505 * 0 if socket has been created by socket()
506 * @return the index of the new socket; -1 on error
507 */
508 static int
alloc_socket(struct netconn * newconn,int accepted)509 alloc_socket(struct netconn *newconn, int accepted)
510 {
511 int i;
512 SYS_ARCH_DECL_PROTECT(lev);
513 LWIP_UNUSED_ARG(accepted);
514
515 /* allocate a new socket identifier */
516 for (i = 0; i < NUM_SOCKETS; ++i) {
517 /* Protect socket array */
518 SYS_ARCH_PROTECT(lev);
519 if (!sockets[i].conn) {
520 #if LWIP_NETCONN_FULLDUPLEX
521 if (sockets[i].fd_used) {
522 SYS_ARCH_UNPROTECT(lev);
523 continue;
524 }
525 sockets[i].fd_used = 1;
526 sockets[i].fd_free_pending = 0;
527 #endif
528 sockets[i].conn = newconn;
529 /* The socket is not yet known to anyone, so no need to protect
530 after having marked it as used. */
531 SYS_ARCH_UNPROTECT(lev);
532 sockets[i].lastdata.pbuf = NULL;
533 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
534 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
535 sockets[i].rcvevent = 0;
536 /* TCP sendbuf is empty, but the socket is not yet writable until connected
537 * (unless it has been created by accept()). */
538 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
539 sockets[i].errevent = 0;
540 init_waitqueue_head(&sockets[i].wq);
541 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
542 return i + LWIP_SOCKET_OFFSET;
543 }
544 SYS_ARCH_UNPROTECT(lev);
545 }
546 return -1;
547 }
548
549 /** Free a socket (under lock)
550 *
551 * @param sock the socket to free
552 * @param is_tcp != 0 for TCP sockets, used to free lastdata
553 * @param conn the socekt's netconn is stored here, must be freed externally
554 * @param lastdata lastdata is stored here, must be freed externally
555 */
556 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)557 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
558 union lwip_sock_lastdata *lastdata)
559 {
560 #if LWIP_NETCONN_FULLDUPLEX
561 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
562 sock->fd_used--;
563 if (sock->fd_used > 0) {
564 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
565 return 0;
566 }
567 #else /* LWIP_NETCONN_FULLDUPLEX */
568 LWIP_UNUSED_ARG(is_tcp);
569 #endif /* LWIP_NETCONN_FULLDUPLEX */
570
571 *lastdata = sock->lastdata;
572 sock->lastdata.pbuf = NULL;
573 *conn = sock->conn;
574 sock->conn = NULL;
575 return 1;
576 }
577
578 /** Free a socket's leftover members.
579 */
580 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)581 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
582 {
583 if (lastdata->pbuf != NULL) {
584 if (is_tcp) {
585 pbuf_free(lastdata->pbuf);
586 } else {
587 netbuf_delete(lastdata->netbuf);
588 }
589 }
590 if (conn != NULL) {
591 /* netconn_prepare_delete() has already been called, here we only free the conn */
592 netconn_delete(conn);
593 }
594 }
595
596 /** Free a socket. The socket's netconn must have been
597 * delete before!
598 *
599 * @param sock the socket to free
600 * @param is_tcp != 0 for TCP sockets, used to free lastdata
601 */
602 static void
free_socket(struct lwip_sock * sock,int is_tcp)603 free_socket(struct lwip_sock *sock, int is_tcp)
604 {
605 int freed;
606 struct netconn *conn;
607 union lwip_sock_lastdata lastdata;
608 SYS_ARCH_DECL_PROTECT(lev);
609
610 /* Protect socket array */
611 SYS_ARCH_PROTECT(lev);
612
613 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
614 SYS_ARCH_UNPROTECT(lev);
615 /* don't use 'sock' after this line, as another task might have allocated it */
616
617 if (freed) {
618 free_socket_free_elements(is_tcp, conn, &lastdata);
619 }
620 }
621
622 /* Below this, the well-known socket functions are implemented.
623 * Use google.com or opengroup.org to get a good description :-)
624 *
625 * Exceptions are documented!
626 */
627
628 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)629 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
630 {
631 struct lwip_sock *sock, *nsock;
632 struct netconn *newconn;
633 ip_addr_t naddr;
634 u16_t port = 0;
635 int newsock;
636 err_t err;
637 int recvevent;
638 SYS_ARCH_DECL_PROTECT(lev);
639
640 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
641 sock = get_socket(s);
642 if (!sock) {
643 return -1;
644 }
645
646 /* wait for a new connection */
647 err = netconn_accept(sock->conn, &newconn);
648 if (err != ERR_OK) {
649 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
650 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
651 sock_set_errno(sock, EOPNOTSUPP);
652 } else if (err == ERR_CLSD) {
653 sock_set_errno(sock, EINVAL);
654 } else {
655 sock_set_errno(sock, err_to_errno(err));
656 }
657 done_socket(sock);
658 return -1;
659 }
660 LWIP_ASSERT("newconn != NULL", newconn != NULL);
661
662 newsock = alloc_socket(newconn, 1);
663 if (newsock == -1) {
664 netconn_delete(newconn);
665 sock_set_errno(sock, ENFILE);
666 done_socket(sock);
667 return -1;
668 }
669 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
670 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
671
672 /* See event_callback: If data comes in right away after an accept, even
673 * though the server task might not have created a new socket yet.
674 * In that case, newconn->socket is counted down (newconn->socket--),
675 * so nsock->rcvevent is >= 1 here!
676 */
677 SYS_ARCH_PROTECT(lev);
678 recvevent = (s16_t)(-1 - newconn->socket);
679 newconn->socket = newsock;
680 SYS_ARCH_UNPROTECT(lev);
681
682 if (newconn->callback) {
683 LOCK_TCPIP_CORE();
684 while (recvevent > 0) {
685 recvevent--;
686 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
687 }
688 UNLOCK_TCPIP_CORE();
689 }
690
691 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
692 * not be NULL if addr is valid.
693 */
694 if ((addr != NULL) && (addrlen != NULL)) {
695 union sockaddr_aligned tempaddr;
696 /* get the IP address and port of the remote host */
697 err = netconn_peer(newconn, &naddr, &port);
698 if (err != ERR_OK) {
699 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
700 free_socket(nsock, 1);
701 sock_set_errno(sock, err_to_errno(err));
702 done_socket(sock);
703 return -1;
704 }
705
706 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
707 if (*addrlen > tempaddr.sa.sa_len) {
708 *addrlen = tempaddr.sa.sa_len;
709 }
710 MEMCPY(addr, &tempaddr, *addrlen);
711
712 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
713 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
714 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
715 } else {
716 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
717 }
718
719 sock_set_errno(sock, 0);
720 done_socket(sock);
721 done_socket(nsock);
722 return newsock;
723 }
724
725 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)726 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
727 {
728 struct lwip_sock *sock;
729 ip_addr_t local_addr;
730 u16_t local_port;
731 err_t err;
732
733 sock = get_socket(s);
734 if (!sock) {
735 return -1;
736 }
737
738 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
739 /* sockaddr does not match socket type (IPv4/IPv6) */
740 sock_set_errno(sock, err_to_errno(ERR_VAL));
741 done_socket(sock);
742 return -1;
743 }
744
745 /* check size, family and alignment of 'name' */
746 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
747 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
748 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
749 LWIP_UNUSED_ARG(namelen);
750
751 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
752 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
753 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
754 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
755
756 #if LWIP_IPV4 && LWIP_IPV6
757 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
758 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
759 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
760 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
761 }
762 #endif /* LWIP_IPV4 && LWIP_IPV6 */
763
764 err = netconn_bind(sock->conn, &local_addr, local_port);
765
766 if (err != ERR_OK) {
767 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
768 sock_set_errno(sock, err_to_errno(err));
769 done_socket(sock);
770 return -1;
771 }
772
773 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
774 sock_set_errno(sock, 0);
775 done_socket(sock);
776 return 0;
777 }
778
779 int
lwip_close(int s)780 lwip_close(int s)
781 {
782 #if LWIP_ENABLE_DISTRIBUTED_NET
783 if (!is_distributed_net_enabled()) {
784 return lwip_close_internal(s);
785 }
786 return distributed_net_close(s);
787 }
788
789 int
lwip_close_internal(int s)790 lwip_close_internal(int s)
791 {
792 #endif
793 struct lwip_sock *sock;
794 int is_tcp = 0;
795 err_t err;
796
797 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
798
799 sock = get_socket(s);
800 if (!sock) {
801 return -1;
802 }
803
804 if (sock->conn != NULL) {
805 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
806 } else {
807 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
808 }
809
810 #if LWIP_IGMP
811 /* drop all possibly joined IGMP memberships */
812 lwip_socket_drop_registered_memberships(s);
813 #endif /* LWIP_IGMP */
814 #if LWIP_IPV6_MLD
815 /* drop all possibly joined MLD6 memberships */
816 lwip_socket_drop_registered_mld6_memberships(s);
817 #endif /* LWIP_IPV6_MLD */
818
819 err = netconn_prepare_delete(sock->conn);
820 if (err != ERR_OK) {
821 sock_set_errno(sock, err_to_errno(err));
822 done_socket(sock);
823 return -1;
824 }
825
826 free_socket(sock, is_tcp);
827 set_errno(0);
828 return 0;
829 }
830
831 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)832 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
833 {
834 #if LWIP_ENABLE_DISTRIBUTED_NET
835 if (!is_distributed_net_enabled()) {
836 return lwip_connect_internal(s, name, namelen);
837 }
838 return distributed_net_connect(s, name, namelen);
839 }
840
841 int
lwip_connect_internal(int s,const struct sockaddr * name,socklen_t namelen)842 lwip_connect_internal(int s, const struct sockaddr *name, socklen_t namelen)
843 {
844 #endif
845 struct lwip_sock *sock;
846 err_t err;
847
848 sock = get_socket(s);
849 if (!sock) {
850 return -1;
851 }
852
853 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
854 /* sockaddr does not match socket type (IPv4/IPv6) */
855 sock_set_errno(sock, err_to_errno(ERR_VAL));
856 done_socket(sock);
857 return -1;
858 }
859
860 LWIP_UNUSED_ARG(namelen);
861 if (name->sa_family == AF_UNSPEC) {
862 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
863 err = netconn_disconnect(sock->conn);
864 } else {
865 ip_addr_t remote_addr;
866 u16_t remote_port;
867
868 /* check size, family and alignment of 'name' */
869 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
870 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
871 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
872
873 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
874 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
875 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
876 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
877
878 #if LWIP_IPV4 && LWIP_IPV6
879 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
880 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
881 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
882 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
883 }
884 #endif /* LWIP_IPV4 && LWIP_IPV6 */
885
886 err = netconn_connect(sock->conn, &remote_addr, remote_port);
887 }
888
889 if (err != ERR_OK) {
890 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
891 sock_set_errno(sock, err_to_errno(err));
892 done_socket(sock);
893 return -1;
894 }
895
896 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
897 sock_set_errno(sock, 0);
898 done_socket(sock);
899 return 0;
900 }
901
902 /**
903 * Set a socket into listen mode.
904 * The socket may not have been used for another connection previously.
905 *
906 * @param s the socket to set to listening mode
907 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
908 * @return 0 on success, non-zero on failure
909 */
910 int
lwip_listen(int s,int backlog)911 lwip_listen(int s, int backlog)
912 {
913 struct lwip_sock *sock;
914 err_t err;
915
916 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
917
918 sock = get_socket(s);
919 if (!sock) {
920 return -1;
921 }
922
923 /* limit the "backlog" parameter to fit in an u8_t */
924 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
925
926 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
927
928 if (err != ERR_OK) {
929 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
930 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
931 sock_set_errno(sock, EOPNOTSUPP);
932 } else {
933 sock_set_errno(sock, err_to_errno(err));
934 }
935 done_socket(sock);
936 return -1;
937 }
938
939 sock_set_errno(sock, 0);
940 done_socket(sock);
941 return 0;
942 }
943
944 #if LWIP_TCP
945 /* Helper function to loop over receiving pbufs from netconn
946 * until "len" bytes are received or we're otherwise done.
947 * Keeps sock->lastdata for peeking or partly copying.
948 */
949 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)950 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
951 {
952 u8_t apiflags = NETCONN_NOAUTORCVD;
953 ssize_t recvd = 0;
954 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
955
956 LWIP_ASSERT("no socket given", sock != NULL);
957 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
958
959 if (flags & MSG_DONTWAIT) {
960 apiflags |= NETCONN_DONTBLOCK;
961 }
962
963 do {
964 struct pbuf *p;
965 err_t err;
966 u16_t copylen;
967
968 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
969 /* Check if there is data left from the last recv operation. */
970 if (sock->lastdata.pbuf) {
971 p = sock->lastdata.pbuf;
972 } else {
973 /* No data was left from the previous operation, so we try to get
974 some from the network. */
975 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
976 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
977 err, (void *)p));
978
979 if (err != ERR_OK) {
980 if (recvd > 0) {
981 /* already received data, return that (this trusts in getting the same error from
982 netconn layer again next time netconn_recv is called) */
983 goto lwip_recv_tcp_done;
984 }
985 /* We should really do some error checking here. */
986 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
987 lwip_strerr(err)));
988 sock_set_errno(sock, err_to_errno(err));
989 if (err == ERR_CLSD) {
990 return 0;
991 } else {
992 return -1;
993 }
994 }
995 LWIP_ASSERT("p != NULL", p != NULL);
996 sock->lastdata.pbuf = p;
997 }
998
999 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
1000 p->tot_len, (int)recv_left, (int)recvd));
1001
1002 if (recv_left > p->tot_len) {
1003 copylen = p->tot_len;
1004 } else {
1005 copylen = (u16_t)recv_left;
1006 }
1007 if (recvd + copylen < recvd) {
1008 /* overflow */
1009 copylen = (u16_t)(SSIZE_MAX - recvd);
1010 }
1011
1012 /* copy the contents of the received buffer into
1013 the supplied memory pointer mem */
1014 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
1015
1016 recvd += copylen;
1017
1018 /* TCP combines multiple pbufs for one recv */
1019 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
1020 recv_left -= copylen;
1021
1022 /* Unless we peek the incoming message... */
1023 if ((flags & MSG_PEEK) == 0) {
1024 /* ... check if there is data left in the pbuf */
1025 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
1026 if (p->tot_len - copylen > 0) {
1027 /* If so, it should be saved in the sock structure for the next recv call.
1028 We store the pbuf but hide/free the consumed data: */
1029 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1030 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1031 } else {
1032 sock->lastdata.pbuf = NULL;
1033 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1034 pbuf_free(p);
1035 }
1036 }
1037 /* once we have some data to return, only add more if we don't need to wait */
1038 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1039 /* @todo: do we need to support peeking more than one pbuf? */
1040 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1041 lwip_recv_tcp_done:
1042 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1043 /* ensure window update after copying all data */
1044 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1045 }
1046 sock_set_errno(sock, 0);
1047 return recvd;
1048 }
1049 #endif
1050
1051 /* Convert a netbuf's address data to struct sockaddr */
1052 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1053 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1054 struct sockaddr *from, socklen_t *fromlen)
1055 {
1056 int truncated = 0;
1057 union sockaddr_aligned saddr;
1058
1059 LWIP_UNUSED_ARG(conn);
1060
1061 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1062 LWIP_ASSERT("from != NULL", from != NULL);
1063 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1064
1065 #if LWIP_IPV4 && LWIP_IPV6
1066 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1067 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1068 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1069 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1070 }
1071 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1072
1073 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1074 DF_NADDR(*fromaddr);
1075 if (*fromlen < saddr.sa.sa_len) {
1076 truncated = 1;
1077 } else if (*fromlen > saddr.sa.sa_len) {
1078 *fromlen = saddr.sa.sa_len;
1079 }
1080 MEMCPY(from, &saddr, *fromlen);
1081 return truncated;
1082 }
1083
1084 #if LWIP_TCP
1085 /* Helper function to get a tcp socket's remote address info */
1086 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1087 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1088 {
1089 if (sock == NULL) {
1090 return 0;
1091 }
1092 LWIP_UNUSED_ARG(dbg_fn);
1093 LWIP_UNUSED_ARG(dbg_s);
1094 LWIP_UNUSED_ARG(dbg_ret);
1095
1096 #if !SOCKETS_DEBUG
1097 if (from && fromlen)
1098 #endif /* !SOCKETS_DEBUG */
1099 {
1100 /* get remote addr/port from tcp_pcb */
1101 u16_t port;
1102 ip_addr_t tmpaddr;
1103 err_t err = netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1104 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1105 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1106 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1107 if (!err && from && fromlen) {
1108 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1109 }
1110 }
1111 return 0;
1112 }
1113 #endif
1114
1115 /* Helper function to receive a netbuf from a udp or raw netconn.
1116 * Keeps sock->lastdata for peeking.
1117 */
1118 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1119 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1120 {
1121 struct netbuf *buf;
1122 u8_t apiflags;
1123 err_t err;
1124 u16_t buflen, copylen, copied;
1125 int i;
1126
1127 LWIP_UNUSED_ARG(dbg_s);
1128 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1129
1130 if (flags & MSG_DONTWAIT) {
1131 apiflags = NETCONN_DONTBLOCK;
1132 } else {
1133 apiflags = 0;
1134 }
1135
1136 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1137 /* Check if there is data left from the last recv operation. */
1138 buf = sock->lastdata.netbuf;
1139 if (buf == NULL) {
1140 /* No data was left from the previous operation, so we try to get
1141 some from the network. */
1142 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1143 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1144 err, (void *)buf));
1145
1146 if (err != ERR_OK) {
1147 return err;
1148 }
1149 LWIP_ASSERT("buf != NULL", buf != NULL);
1150 sock->lastdata.netbuf = buf;
1151 }
1152 buflen = buf->p->tot_len;
1153 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1154
1155 copied = 0;
1156 /* copy the pbuf payload into the iovs */
1157 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1158 u16_t len_left = (u16_t)(buflen - copied);
1159 if (msg->msg_iov[i].iov_len > len_left) {
1160 copylen = len_left;
1161 } else {
1162 copylen = (u16_t)msg->msg_iov[i].iov_len;
1163 }
1164
1165 /* copy the contents of the received buffer into
1166 the supplied memory buffer */
1167 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1168 copied = (u16_t)(copied + copylen);
1169 }
1170
1171 /* Check to see from where the data was.*/
1172 #if !SOCKETS_DEBUG
1173 if (msg->msg_name && msg->msg_namelen)
1174 #endif /* !SOCKETS_DEBUG */
1175 {
1176 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1177 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1178 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1179 if (msg->msg_name && msg->msg_namelen) {
1180 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1181 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1182 }
1183 }
1184
1185 /* Initialize flag output */
1186 msg->msg_flags = 0;
1187
1188 if (msg->msg_control) {
1189 u8_t wrote_msg = 0;
1190 #if LWIP_NETBUF_RECVINFO
1191 /* Check if packet info was recorded */
1192 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1193 if (IP_IS_V4(&buf->toaddr)) {
1194 #if LWIP_IPV4
1195 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1196 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1197 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1198 chdr->cmsg_level = IPPROTO_IP;
1199 chdr->cmsg_type = IP_PKTINFO;
1200 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1201 pkti->ipi_ifindex = buf->p->if_idx;
1202 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1203 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1204 wrote_msg = 1;
1205 } else {
1206 msg->msg_flags |= MSG_CTRUNC;
1207 }
1208 #endif /* LWIP_IPV4 */
1209 }
1210 }
1211 #endif /* LWIP_NETBUF_RECVINFO */
1212
1213 if (!wrote_msg) {
1214 msg->msg_controllen = 0;
1215 }
1216 }
1217
1218 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1219 if ((flags & MSG_PEEK) == 0) {
1220 sock->lastdata.netbuf = NULL;
1221 netbuf_delete(buf);
1222 }
1223 if (datagram_len) {
1224 *datagram_len = buflen;
1225 }
1226 return ERR_OK;
1227 }
1228
1229 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1230 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1231 struct sockaddr *from, socklen_t *fromlen)
1232 {
1233 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1234 if (!is_distributed_net_enabled()) {
1235 return lwip_recvfrom_internal(s, mem, len, flags, from, fromlen);
1236 }
1237 return distributed_net_recvfrom(s, mem, len, flags, from, fromlen);
1238 }
1239
1240 ssize_t
lwip_recvfrom_internal(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1241 lwip_recvfrom_internal(int s, void *mem, size_t len, int flags,
1242 struct sockaddr *from, socklen_t *fromlen)
1243 {
1244 #endif
1245 struct lwip_sock *sock;
1246 ssize_t ret;
1247
1248 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1249 sock = get_socket(s);
1250 if (!sock) {
1251 return -1;
1252 }
1253 #if LWIP_TCP
1254 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1255 ret = lwip_recv_tcp(sock, mem, len, flags);
1256 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1257 done_socket(sock);
1258 return ret;
1259 } else
1260 #endif
1261 {
1262 u16_t datagram_len = 0;
1263 struct iovec vec;
1264 struct msghdr msg;
1265 err_t err;
1266 vec.iov_base = mem;
1267 vec.iov_len = len;
1268 msg.msg_control = NULL;
1269 msg.msg_controllen = 0;
1270 msg.msg_flags = 0;
1271 msg.msg_iov = &vec;
1272 msg.msg_iovlen = 1;
1273 msg.msg_name = from;
1274 msg.msg_namelen = (fromlen ? *fromlen : 0);
1275 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1276 if (err != ERR_OK) {
1277 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1278 s, lwip_strerr(err)));
1279 sock_set_errno(sock, err_to_errno(err));
1280 done_socket(sock);
1281 return -1;
1282 }
1283 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1284 if (fromlen) {
1285 *fromlen = msg.msg_namelen;
1286 }
1287 }
1288
1289 sock_set_errno(sock, 0);
1290 done_socket(sock);
1291 return ret;
1292 }
1293
1294 ssize_t
lwip_read(int s,void * mem,size_t len)1295 lwip_read(int s, void *mem, size_t len)
1296 {
1297 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1298 }
1299
1300 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1301 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1302 {
1303 struct msghdr msg;
1304
1305 msg.msg_name = NULL;
1306 msg.msg_namelen = 0;
1307 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1308 Blame the opengroup standard for this inconsistency. */
1309 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1310 msg.msg_iovlen = iovcnt;
1311 msg.msg_control = NULL;
1312 msg.msg_controllen = 0;
1313 msg.msg_flags = 0;
1314 return lwip_recvmsg(s, &msg, 0);
1315 }
1316
1317 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1318 lwip_recv(int s, void *mem, size_t len, int flags)
1319 {
1320 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1321 }
1322
1323 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1324 lwip_recvmsg(int s, struct msghdr *message, int flags)
1325 {
1326 struct lwip_sock *sock;
1327 int i;
1328 ssize_t buflen;
1329
1330 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1331 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1332 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1333 set_errno(EOPNOTSUPP); return -1;);
1334
1335 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1336 set_errno(EMSGSIZE);
1337 return -1;
1338 }
1339
1340 sock = get_socket(s);
1341 if (!sock) {
1342 return -1;
1343 }
1344
1345 /* check for valid vectors */
1346 buflen = 0;
1347 for (i = 0; i < message->msg_iovlen; i++) {
1348 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1349 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1350 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1351 sock_set_errno(sock, err_to_errno(ERR_VAL));
1352 done_socket(sock);
1353 return -1;
1354 }
1355 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1356 }
1357
1358 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1359 #if LWIP_TCP
1360 int recv_flags = flags;
1361 message->msg_flags = 0;
1362 /* recv the data */
1363 buflen = 0;
1364 for (i = 0; i < message->msg_iovlen; i++) {
1365 /* try to receive into this vector's buffer */
1366 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1367 if (recvd_local > 0) {
1368 /* sum up received bytes */
1369 buflen += recvd_local;
1370 }
1371 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1372 (flags & MSG_PEEK)) {
1373 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1374 if (buflen <= 0) {
1375 /* nothing received at all, propagate the error */
1376 buflen = recvd_local;
1377 }
1378 break;
1379 }
1380 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1381 recv_flags |= MSG_DONTWAIT;
1382 }
1383 if (buflen > 0) {
1384 /* reset socket error since we have received something */
1385 sock_set_errno(sock, 0);
1386 }
1387 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1388 done_socket(sock);
1389 return buflen;
1390 #else /* LWIP_TCP */
1391 sock_set_errno(sock, err_to_errno(ERR_ARG));
1392 done_socket(sock);
1393 return -1;
1394 #endif /* LWIP_TCP */
1395 }
1396 /* else, UDP and RAW NETCONNs */
1397 #if LWIP_UDP || LWIP_RAW
1398 {
1399 u16_t datagram_len = 0;
1400 err_t err;
1401 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1402 if (err != ERR_OK) {
1403 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1404 s, lwip_strerr(err)));
1405 sock_set_errno(sock, err_to_errno(err));
1406 done_socket(sock);
1407 return -1;
1408 }
1409 if (datagram_len > buflen) {
1410 message->msg_flags |= MSG_TRUNC;
1411 }
1412
1413 sock_set_errno(sock, 0);
1414 done_socket(sock);
1415 return (int)datagram_len;
1416 }
1417 #else /* LWIP_UDP || LWIP_RAW */
1418 sock_set_errno(sock, err_to_errno(ERR_ARG));
1419 done_socket(sock);
1420 return -1;
1421 #endif /* LWIP_UDP || LWIP_RAW */
1422 }
1423
1424 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1425 lwip_send(int s, const void *data, size_t size, int flags)
1426 {
1427 struct lwip_sock *sock;
1428 err_t err;
1429 u8_t write_flags;
1430 size_t written;
1431
1432 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1433 s, data, size, flags));
1434
1435 sock = get_socket(s);
1436 if (!sock) {
1437 return -1;
1438 }
1439
1440 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1441 #if (LWIP_UDP || LWIP_RAW)
1442 done_socket(sock);
1443 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1444 return lwip_sendto_internal(s, data, size, flags, NULL, 0);
1445 #else
1446 return lwip_sendto(s, data, size, flags, NULL, 0);
1447 #endif
1448 #else /* (LWIP_UDP || LWIP_RAW) */
1449 sock_set_errno(sock, err_to_errno(ERR_ARG));
1450 done_socket(sock);
1451 return -1;
1452 #endif /* (LWIP_UDP || LWIP_RAW) */
1453 }
1454
1455 write_flags = (u8_t)(NETCONN_COPY |
1456 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1457 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1458 written = 0;
1459 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1460
1461 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1462 sock_set_errno(sock, err_to_errno(err));
1463 done_socket(sock);
1464 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1465 return (err == ERR_OK ? (ssize_t)written : -1);
1466 }
1467
1468 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1469 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1470 {
1471 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL && LWIP_DISTRIBUTED_NET_ENABLE_SENDMSG
1472 if (!is_distributed_net_enabled()) {
1473 return lwip_sendmsg_internal(s, msg, flags);
1474 }
1475 return distributed_net_sendmsg(s, msg, flags);
1476 }
1477
1478 ssize_t
lwip_sendmsg_internal(int s,const struct msghdr * msg,int flags)1479 lwip_sendmsg_internal(int s, const struct msghdr *msg, int flags)
1480 {
1481 #endif
1482 struct lwip_sock *sock;
1483 #if LWIP_TCP
1484 u8_t write_flags;
1485 size_t written;
1486 #endif
1487 err_t err = ERR_OK;
1488
1489 sock = get_socket(s);
1490 if (!sock) {
1491 return -1;
1492 }
1493
1494 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1495 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1496 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1497 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1498 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1499 sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;);
1500 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1501 sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;);
1502
1503 LWIP_UNUSED_ARG(msg->msg_control);
1504 LWIP_UNUSED_ARG(msg->msg_controllen);
1505 LWIP_UNUSED_ARG(msg->msg_flags);
1506
1507 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1508 #if LWIP_TCP
1509 write_flags = (u8_t)(NETCONN_COPY |
1510 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1511 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1512
1513 written = 0;
1514 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1515 sock_set_errno(sock, err_to_errno(err));
1516 done_socket(sock);
1517 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1518 return (err == ERR_OK ? (ssize_t)written : -1);
1519 #else /* LWIP_TCP */
1520 sock_set_errno(sock, err_to_errno(ERR_ARG));
1521 done_socket(sock);
1522 return -1;
1523 #endif /* LWIP_TCP */
1524 }
1525 /* else, UDP and RAW NETCONNs */
1526 #if LWIP_UDP || LWIP_RAW
1527 {
1528 struct netbuf chain_buf;
1529 int i;
1530 ssize_t size = 0;
1531
1532 LWIP_UNUSED_ARG(flags);
1533 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1534 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1535 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1536
1537 /* initialize chain buffer with destination */
1538 memset(&chain_buf, 0, sizeof(struct netbuf));
1539 if (msg->msg_name) {
1540 u16_t remote_port;
1541 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1542 netbuf_fromport(&chain_buf) = remote_port;
1543 }
1544 #if LWIP_NETIF_TX_SINGLE_PBUF
1545 for (i = 0; i < msg->msg_iovlen; i++) {
1546 size += msg->msg_iov[i].iov_len;
1547 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1548 /* overflow */
1549 goto sendmsg_emsgsize;
1550 }
1551 }
1552 if (size > 0xFFFF) {
1553 /* overflow */
1554 goto sendmsg_emsgsize;
1555 }
1556 /* Allocate a new netbuf and copy the data into it. */
1557 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1558 err = ERR_MEM;
1559 } else {
1560 /* flatten the IO vectors */
1561 size_t offset = 0;
1562 for (i = 0; i < msg->msg_iovlen; i++) {
1563 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1564 offset += msg->msg_iov[i].iov_len;
1565 }
1566 #if LWIP_CHECKSUM_ON_COPY
1567 {
1568 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1569 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1570 netbuf_set_chksum(&chain_buf, chksum);
1571 }
1572 #endif /* LWIP_CHECKSUM_ON_COPY */
1573 err = ERR_OK;
1574 }
1575 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1576 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1577 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1578 for (i = 0; i < msg->msg_iovlen; i++) {
1579 struct pbuf *p;
1580 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1581 /* overflow */
1582 goto sendmsg_emsgsize;
1583 }
1584 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1585 if (p == NULL) {
1586 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1587 break;
1588 }
1589 p->payload = msg->msg_iov[i].iov_base;
1590 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1591 /* netbuf empty, add new pbuf */
1592 if (chain_buf.p == NULL) {
1593 chain_buf.p = chain_buf.ptr = p;
1594 /* add pbuf to existing pbuf chain */
1595 } else {
1596 if (chain_buf.p->tot_len + p->len > 0xffff) {
1597 /* overflow */
1598 pbuf_free(p);
1599 goto sendmsg_emsgsize;
1600 }
1601 pbuf_cat(chain_buf.p, p);
1602 }
1603 }
1604 /* save size of total chain */
1605 if (err == ERR_OK) {
1606 size = netbuf_len(&chain_buf);
1607 }
1608 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1609
1610 if (err == ERR_OK) {
1611 #if LWIP_IPV4 && LWIP_IPV6
1612 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1613 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1614 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1615 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1616 }
1617 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1618
1619 /* send the data */
1620 err = netconn_send(sock->conn, &chain_buf);
1621 }
1622
1623 /* deallocated the buffer */
1624 netbuf_free(&chain_buf);
1625
1626 sock_set_errno(sock, err_to_errno(err));
1627 done_socket(sock);
1628 return (err == ERR_OK ? size : -1);
1629 sendmsg_emsgsize:
1630 sock_set_errno(sock, EMSGSIZE);
1631 netbuf_free(&chain_buf);
1632 done_socket(sock);
1633 return -1;
1634 }
1635 #else /* LWIP_UDP || LWIP_RAW */
1636 sock_set_errno(sock, err_to_errno(ERR_ARG));
1637 done_socket(sock);
1638 return -1;
1639 #endif /* LWIP_UDP || LWIP_RAW */
1640 }
1641
1642 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1643 lwip_sendto(int s, const void *data, size_t size, int flags,
1644 const struct sockaddr *to, socklen_t tolen)
1645 {
1646 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1647 if (!is_distributed_net_enabled()) {
1648 return lwip_sendto_internal(s, data, size, flags, to, tolen);
1649 }
1650 return distributed_net_sendto(s, data, size, flags, to, tolen);
1651 }
1652
1653 ssize_t
lwip_sendto_internal(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1654 lwip_sendto_internal(int s, const void *data, size_t size, int flags,
1655 const struct sockaddr *to, socklen_t tolen)
1656 {
1657 #endif
1658 struct lwip_sock *sock;
1659 err_t err;
1660 u16_t short_size;
1661 u16_t remote_port;
1662 struct netbuf buf;
1663
1664 sock = get_socket(s);
1665 if (!sock) {
1666 return -1;
1667 }
1668
1669 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1670 #if LWIP_TCP
1671 done_socket(sock);
1672 return lwip_send(s, data, size, flags);
1673 #else /* LWIP_TCP */
1674 LWIP_UNUSED_ARG(flags);
1675 sock_set_errno(sock, err_to_errno(ERR_ARG));
1676 done_socket(sock);
1677 return -1;
1678 #endif /* LWIP_TCP */
1679 }
1680
1681 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1682 /* cannot fit into one datagram (at least for us) */
1683 sock_set_errno(sock, EMSGSIZE);
1684 done_socket(sock);
1685 return -1;
1686 }
1687 short_size = (u16_t)size;
1688 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1689 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1690 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1691 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1692 LWIP_UNUSED_ARG(tolen);
1693
1694 /* initialize a buffer */
1695 buf.p = buf.ptr = NULL;
1696 #if LWIP_CHECKSUM_ON_COPY
1697 buf.flags = 0;
1698 #endif /* LWIP_CHECKSUM_ON_COPY */
1699 if (to) {
1700 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1701 } else {
1702 remote_port = 0;
1703 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1704 }
1705 netbuf_fromport(&buf) = remote_port;
1706
1707
1708 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1709 s, data, short_size, flags));
1710 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1711 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1712
1713 /* make the buffer point to the data that should be sent */
1714 #if LWIP_NETIF_TX_SINGLE_PBUF
1715 /* Allocate a new netbuf and copy the data into it. */
1716 if (netbuf_alloc(&buf, short_size) == NULL) {
1717 err = ERR_MEM;
1718 } else {
1719 #if LWIP_CHECKSUM_ON_COPY
1720 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1721 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1722 netbuf_set_chksum(&buf, chksum);
1723 } else
1724 #endif /* LWIP_CHECKSUM_ON_COPY */
1725 {
1726 MEMCPY(buf.p->payload, data, short_size);
1727 }
1728 err = ERR_OK;
1729 }
1730 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1731 err = netbuf_ref(&buf, data, short_size);
1732 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1733 if (err == ERR_OK) {
1734 #if LWIP_IPV4 && LWIP_IPV6
1735 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1736 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1737 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1738 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1739 }
1740 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1741
1742 /* send the data */
1743 err = netconn_send(sock->conn, &buf);
1744 }
1745
1746 /* deallocated the buffer */
1747 netbuf_free(&buf);
1748
1749 sock_set_errno(sock, err_to_errno(err));
1750 done_socket(sock);
1751 return (err == ERR_OK ? short_size : -1);
1752 }
1753
1754 int
lwip_socket(int domain,int type,int protocol)1755 lwip_socket(int domain, int type, int protocol)
1756 {
1757 struct netconn *conn;
1758 int i;
1759
1760 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1761
1762 /* create a netconn */
1763 switch (type) {
1764 case SOCK_RAW:
1765 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1766 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1767 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1768 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1769 break;
1770 case SOCK_DGRAM:
1771 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1772 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1773 DEFAULT_SOCKET_EVENTCB);
1774 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1775 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1776 #if LWIP_NETBUF_RECVINFO
1777 if (conn) {
1778 /* netconn layer enables pktinfo by default, sockets default to off */
1779 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1780 }
1781 #endif /* LWIP_NETBUF_RECVINFO */
1782 break;
1783 case SOCK_STREAM:
1784 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1785 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1786 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1787 break;
1788 default:
1789 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1790 domain, type, protocol));
1791 set_errno(EINVAL);
1792 return -1;
1793 }
1794
1795 if (!conn) {
1796 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1797 set_errno(ENOBUFS);
1798 return -1;
1799 }
1800
1801 i = alloc_socket(conn, 0);
1802
1803 if (i == -1) {
1804 netconn_delete(conn);
1805 set_errno(ENFILE);
1806 return -1;
1807 }
1808 conn->socket = i;
1809 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1810 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1811 set_errno(0);
1812 return i;
1813 }
1814
1815 ssize_t
lwip_write(int s,const void * data,size_t size)1816 lwip_write(int s, const void *data, size_t size)
1817 {
1818 return lwip_send(s, data, size, 0);
1819 }
1820
1821 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1822 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1823 {
1824 struct msghdr msg;
1825
1826 msg.msg_name = NULL;
1827 msg.msg_namelen = 0;
1828 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1829 Blame the opengroup standard for this inconsistency. */
1830 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1831 msg.msg_iovlen = iovcnt;
1832 msg.msg_control = NULL;
1833 msg.msg_controllen = 0;
1834 msg.msg_flags = 0;
1835 return lwip_sendmsg(s, &msg, 0);
1836 }
1837
1838 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1839 /* Add select_cb to select_cb_list. */
1840 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1841 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1842 {
1843 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1844
1845 /* Protect the select_cb_list */
1846 LWIP_SOCKET_SELECT_PROTECT(lev);
1847
1848 /* Put this select_cb on top of list */
1849 select_cb->next = select_cb_list;
1850 if (select_cb_list != NULL) {
1851 select_cb_list->prev = select_cb;
1852 }
1853 select_cb_list = select_cb;
1854 #if !LWIP_TCPIP_CORE_LOCKING
1855 /* Increasing this counter tells select_check_waiters that the list has changed. */
1856 select_cb_ctr++;
1857 #endif
1858
1859 /* Now we can safely unprotect */
1860 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1861 }
1862
1863 /* Remove select_cb from select_cb_list. */
1864 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1865 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1866 {
1867 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1868
1869 /* Take us off the list */
1870 LWIP_SOCKET_SELECT_PROTECT(lev);
1871 if (select_cb->next != NULL) {
1872 select_cb->next->prev = select_cb->prev;
1873 }
1874 if (select_cb_list == select_cb) {
1875 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1876 select_cb_list = select_cb->next;
1877 } else {
1878 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1879 select_cb->prev->next = select_cb->next;
1880 }
1881 #if !LWIP_TCPIP_CORE_LOCKING
1882 /* Increasing this counter tells select_check_waiters that the list has changed. */
1883 select_cb_ctr++;
1884 #endif
1885 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1886 }
1887 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1888
1889 #if LWIP_SOCKET_SELECT
1890 /**
1891 * Go through the readset and writeset lists and see which socket of the sockets
1892 * set in the sets has events. On return, readset, writeset and exceptset have
1893 * the sockets enabled that had events.
1894 *
1895 * @param maxfdp1 the highest socket index in the sets
1896 * @param readset_in set of sockets to check for read events
1897 * @param writeset_in set of sockets to check for write events
1898 * @param exceptset_in set of sockets to check for error events
1899 * @param readset_out set of sockets that had read events
1900 * @param writeset_out set of sockets that had write events
1901 * @param exceptset_out set os sockets that had error events
1902 * @return number of sockets that had events (read/write/exception) (>= 0)
1903 */
1904 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1905 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1906 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1907 {
1908 int i, nready = 0;
1909 fd_set lreadset, lwriteset, lexceptset;
1910 struct lwip_sock *sock;
1911 SYS_ARCH_DECL_PROTECT(lev);
1912
1913 FD_ZERO(&lreadset);
1914 FD_ZERO(&lwriteset);
1915 FD_ZERO(&lexceptset);
1916
1917 /* Go through each socket in each list to count number of sockets which
1918 currently match */
1919 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1920 /* if this FD is not in the set, continue */
1921 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1922 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1923 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1924 continue;
1925 }
1926 /* First get the socket's status (protected)... */
1927 SYS_ARCH_PROTECT(lev);
1928 sock = tryget_socket_unconn_locked(i);
1929 if (sock != NULL) {
1930 void *lastdata = sock->lastdata.pbuf;
1931 s16_t rcvevent = sock->rcvevent;
1932 u16_t sendevent = sock->sendevent;
1933 u16_t errevent = sock->errevent;
1934 SYS_ARCH_UNPROTECT(lev);
1935
1936 /* ... then examine it: */
1937 /* See if netconn of this socket is ready for read */
1938 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1939 FD_SET(i, &lreadset);
1940 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1941 nready++;
1942 }
1943 /* See if netconn of this socket is ready for write */
1944 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1945 FD_SET(i, &lwriteset);
1946 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1947 nready++;
1948 }
1949 /* See if netconn of this socket had an error */
1950 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1951 FD_SET(i, &lexceptset);
1952 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1953 nready++;
1954 }
1955 done_socket(sock);
1956 } else {
1957 SYS_ARCH_UNPROTECT(lev);
1958 /* no a valid open socket */
1959 return -1;
1960 }
1961 }
1962 /* copy local sets to the ones provided as arguments */
1963 *readset_out = lreadset;
1964 *writeset_out = lwriteset;
1965 *exceptset_out = lexceptset;
1966
1967 LWIP_ASSERT("nready >= 0", nready >= 0);
1968 return nready;
1969 }
1970
1971 #if LWIP_NETCONN_FULLDUPLEX
1972 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1973 * All sockets are marked (and later unmarked), whether they are open or not.
1974 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1975 */
1976 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)1977 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1978 {
1979 SYS_ARCH_DECL_PROTECT(lev);
1980 if (fdset) {
1981 int i;
1982 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1983 /* if this FD is in the set, lock it (unless already done) */
1984 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1985 struct lwip_sock *sock;
1986 SYS_ARCH_PROTECT(lev);
1987 sock = tryget_socket_unconn_locked(i);
1988 if (sock != NULL) {
1989 /* leave the socket used until released by lwip_select_dec_sockets_used */
1990 FD_SET(i, used_sockets);
1991 }
1992 SYS_ARCH_UNPROTECT(lev);
1993 }
1994 }
1995 }
1996 }
1997
1998 /* Mark all sockets passed to select as used to prevent them from being freed
1999 * from other threads while select is running.
2000 * Marked sockets are added to 'used_sockets' to mark them only once an be able
2001 * to unmark them correctly.
2002 */
2003 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)2004 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
2005 {
2006 FD_ZERO(used_sockets);
2007 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
2008 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
2009 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
2010 }
2011
2012 /* Let go all sockets that were marked as used when starting select */
2013 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)2014 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
2015 {
2016 int i;
2017 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
2018 /* if this FD is not in the set, continue */
2019 if (FD_ISSET(i, used_sockets)) {
2020 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
2021 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2022 if (sock != NULL) {
2023 done_socket(sock);
2024 }
2025 }
2026 }
2027 }
2028 #else /* LWIP_NETCONN_FULLDUPLEX */
2029 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
2030 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
2031 #endif /* LWIP_NETCONN_FULLDUPLEX */
2032
2033 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)2034 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
2035 struct timeval *timeout)
2036 {
2037 u32_t waitres = 0;
2038 int nready;
2039 fd_set lreadset, lwriteset, lexceptset;
2040 u32_t msectimeout;
2041 int i;
2042 int maxfdp2;
2043 #if LWIP_NETCONN_SEM_PER_THREAD
2044 int waited = 0;
2045 #endif
2046 #if LWIP_NETCONN_FULLDUPLEX
2047 fd_set used_sockets;
2048 #endif
2049 SYS_ARCH_DECL_PROTECT(lev);
2050
2051 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
2052 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
2053 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
2054 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
2055
2056 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
2057 set_errno(EINVAL);
2058 return -1;
2059 }
2060
2061 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
2062
2063 /* Go through each socket in each list to count number of sockets which
2064 currently match */
2065 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2066
2067 if (nready < 0) {
2068 /* one of the sockets in one of the fd_sets was invalid */
2069 set_errno(EBADF);
2070 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2071 return -1;
2072 } else if (nready > 0) {
2073 /* one or more sockets are set, no need to wait */
2074 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2075 } else {
2076 /* If we don't have any current events, then suspend if we are supposed to */
2077 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2078 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2079 /* This is OK as the local fdsets are empty and nready is zero,
2080 or we would have returned earlier. */
2081 } else {
2082 /* None ready: add our semaphore to list:
2083 We don't actually need any dynamic memory. Our entry on the
2084 list is only valid while we are in this function, so it's ok
2085 to use local variables (unless we're running in MPU compatible
2086 mode). */
2087 API_SELECT_CB_VAR_DECLARE(select_cb);
2088 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2089 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2090
2091 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2092 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2093 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2094 #if LWIP_NETCONN_SEM_PER_THREAD
2095 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2096 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2097 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2098 /* failed to create semaphore */
2099 set_errno(ENOMEM);
2100 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2101 API_SELECT_CB_VAR_FREE(select_cb);
2102 return -1;
2103 }
2104 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2105
2106 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2107
2108 /* Increase select_waiting for each socket we are interested in */
2109 maxfdp2 = maxfdp1;
2110 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2111 if ((readset && FD_ISSET(i, readset)) ||
2112 (writeset && FD_ISSET(i, writeset)) ||
2113 (exceptset && FD_ISSET(i, exceptset))) {
2114 struct lwip_sock *sock;
2115 SYS_ARCH_PROTECT(lev);
2116 sock = tryget_socket_unconn_locked(i);
2117 if (sock != NULL) {
2118 sock->select_waiting++;
2119 if (sock->select_waiting == 0) {
2120 /* overflow - too many threads waiting */
2121 sock->select_waiting--;
2122 nready = -1;
2123 maxfdp2 = i;
2124 SYS_ARCH_UNPROTECT(lev);
2125 done_socket(sock);
2126 set_errno(EBUSY);
2127 break;
2128 }
2129 SYS_ARCH_UNPROTECT(lev);
2130 done_socket(sock);
2131 } else {
2132 /* Not a valid socket */
2133 nready = -1;
2134 maxfdp2 = i;
2135 SYS_ARCH_UNPROTECT(lev);
2136 set_errno(EBADF);
2137 break;
2138 }
2139 }
2140 }
2141
2142 if (nready >= 0) {
2143 /* Call lwip_selscan again: there could have been events between
2144 the last scan (without us on the list) and putting us on the list! */
2145 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2146 if (nready < 0) {
2147 set_errno(EBADF);
2148 } else if (!nready) {
2149 /* Still none ready, just wait to be woken */
2150 if (timeout == 0) {
2151 /* Wait forever */
2152 msectimeout = 0;
2153 } else {
2154 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2155 if (msecs_long <= 0) {
2156 /* Wait 1ms at least (0 means wait forever) */
2157 msectimeout = 1;
2158 } else {
2159 msectimeout = (u32_t)msecs_long;
2160 }
2161 }
2162
2163 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2164 #if LWIP_NETCONN_SEM_PER_THREAD
2165 waited = 1;
2166 #endif
2167 }
2168 }
2169
2170 /* Decrease select_waiting for each socket we are interested in */
2171 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2172 if ((readset && FD_ISSET(i, readset)) ||
2173 (writeset && FD_ISSET(i, writeset)) ||
2174 (exceptset && FD_ISSET(i, exceptset))) {
2175 struct lwip_sock *sock;
2176 SYS_ARCH_PROTECT(lev);
2177 sock = tryget_socket_unconn_nouse(i);
2178 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2179 if (sock != NULL) {
2180 /* for now, handle select_waiting==0... */
2181 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2182 if (sock->select_waiting > 0) {
2183 sock->select_waiting--;
2184 }
2185 SYS_ARCH_UNPROTECT(lev);
2186 } else {
2187 SYS_ARCH_UNPROTECT(lev);
2188 /* Not a valid socket */
2189 nready = -1;
2190 set_errno(EBADF);
2191 }
2192 }
2193 }
2194
2195 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2196
2197 #if LWIP_NETCONN_SEM_PER_THREAD
2198 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2199 /* don't leave the thread-local semaphore signalled */
2200 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2201 }
2202 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2203 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2204 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2205 API_SELECT_CB_VAR_FREE(select_cb);
2206
2207 if (nready < 0) {
2208 /* This happens when a socket got closed while waiting */
2209 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2210 return -1;
2211 }
2212
2213 if (waitres == SYS_ARCH_TIMEOUT) {
2214 /* Timeout */
2215 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2216 /* This is OK as the local fdsets are empty and nready is zero,
2217 or we would have returned earlier. */
2218 } else {
2219 /* See what's set now after waiting */
2220 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2221 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2222 if (nready < 0) {
2223 set_errno(EBADF);
2224 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2225 return -1;
2226 }
2227 }
2228 }
2229 }
2230
2231 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2232 set_errno(0);
2233 if (readset) {
2234 *readset = lreadset;
2235 }
2236 if (writeset) {
2237 *writeset = lwriteset;
2238 }
2239 if (exceptset) {
2240 *exceptset = lexceptset;
2241 }
2242 return nready;
2243 }
2244 #endif /* LWIP_SOCKET_SELECT */
2245
2246 #if LWIP_SOCKET_POLL
2247 /** Options for the lwip_pollscan function. */
2248 enum lwip_pollscan_opts
2249 {
2250 /** Clear revents in each struct pollfd. */
2251 LWIP_POLLSCAN_CLEAR = 1,
2252
2253 /** Increment select_waiting in each struct lwip_sock. */
2254 LWIP_POLLSCAN_INC_WAIT = 2,
2255
2256 /** Decrement select_waiting in each struct lwip_sock. */
2257 LWIP_POLLSCAN_DEC_WAIT = 4
2258 };
2259
2260 /**
2261 * Update revents in each struct pollfd.
2262 * Optionally update select_waiting in struct lwip_sock.
2263 *
2264 * @param fds array of structures to update
2265 * @param nfds number of structures in fds
2266 * @param opts what to update and how
2267 * @return number of structures that have revents != 0
2268 */
2269 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2270 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2271 {
2272 int nready = 0;
2273 nfds_t fdi;
2274 struct lwip_sock *sock;
2275 SYS_ARCH_DECL_PROTECT(lev);
2276
2277 /* Go through each struct pollfd in the array. */
2278 for (fdi = 0; fdi < nfds; fdi++) {
2279 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2280 fds[fdi].revents = 0;
2281 }
2282
2283 /* Negative fd means the caller wants us to ignore this struct.
2284 POLLNVAL means we already detected that the fd is invalid;
2285 if another thread has since opened a new socket with that fd,
2286 we must not use that socket. */
2287 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2288 /* First get the socket's status (protected)... */
2289 SYS_ARCH_PROTECT(lev);
2290 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2291 if (sock != NULL) {
2292 void* lastdata = sock->lastdata.pbuf;
2293 s16_t rcvevent = sock->rcvevent;
2294 u16_t sendevent = sock->sendevent;
2295 u16_t errevent = sock->errevent;
2296
2297 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2298 sock->select_waiting++;
2299 if (sock->select_waiting == 0) {
2300 /* overflow - too many threads waiting */
2301 sock->select_waiting--;
2302 nready = -1;
2303 SYS_ARCH_UNPROTECT(lev);
2304 done_socket(sock);
2305 break;
2306 }
2307 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2308 /* for now, handle select_waiting==0... */
2309 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2310 if (sock->select_waiting > 0) {
2311 sock->select_waiting--;
2312 }
2313 }
2314 SYS_ARCH_UNPROTECT(lev);
2315 done_socket(sock);
2316
2317 /* ... then examine it: */
2318 /* See if netconn of this socket is ready for read */
2319 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2320 fds[fdi].revents |= POLLIN;
2321 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2322 }
2323 /* See if netconn of this socket is ready for write */
2324 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2325 fds[fdi].revents |= POLLOUT;
2326 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2327 }
2328 /* See if netconn of this socket had an error */
2329 if (errevent != 0) {
2330 /* POLLERR is output only. */
2331 fds[fdi].revents |= POLLERR;
2332 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2333 }
2334 } else {
2335 /* Not a valid socket */
2336 SYS_ARCH_UNPROTECT(lev);
2337 /* POLLNVAL is output only. */
2338 fds[fdi].revents |= POLLNVAL;
2339 return -1;
2340 }
2341 }
2342
2343 /* Will return the number of structures that have events,
2344 not the number of events. */
2345 if (fds[fdi].revents != 0) {
2346 nready++;
2347 }
2348 }
2349
2350 LWIP_ASSERT("nready >= 0", nready >= 0);
2351 return nready;
2352 }
2353
2354 #if LWIP_NETCONN_FULLDUPLEX
2355 /* Mark all sockets as used.
2356 *
2357 * All sockets are marked (and later unmarked), whether they are open or not.
2358 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2359 */
2360 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2361 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2362 {
2363 nfds_t fdi;
2364
2365 if(fds) {
2366 /* Go through each struct pollfd in the array. */
2367 for (fdi = 0; fdi < nfds; fdi++) {
2368 /* Increase the reference counter */
2369 tryget_socket_unconn(fds[fdi].fd);
2370 }
2371 }
2372 }
2373
2374 /* Let go all sockets that were marked as used when starting poll */
2375 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2376 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2377 {
2378 nfds_t fdi;
2379
2380 if(fds) {
2381 /* Go through each struct pollfd in the array. */
2382 for (fdi = 0; fdi < nfds; fdi++) {
2383 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2384 if (sock != NULL) {
2385 done_socket(sock);
2386 }
2387 }
2388 }
2389 }
2390 #else /* LWIP_NETCONN_FULLDUPLEX */
2391 #define lwip_poll_inc_sockets_used(fds, nfds)
2392 #define lwip_poll_dec_sockets_used(fds, nfds)
2393 #endif /* LWIP_NETCONN_FULLDUPLEX */
2394
2395 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2396 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2397 {
2398 u32_t waitres = 0;
2399 int nready;
2400 u32_t msectimeout;
2401 #if LWIP_NETCONN_SEM_PER_THREAD
2402 int waited = 0;
2403 #endif
2404
2405 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2406 (void*)fds, (int)nfds, timeout));
2407 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2408 set_errno(EINVAL); return -1;);
2409
2410 lwip_poll_inc_sockets_used(fds, nfds);
2411
2412 /* Go through each struct pollfd to count number of structures
2413 which currently match */
2414 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2415
2416 if (nready < 0) {
2417 lwip_poll_dec_sockets_used(fds, nfds);
2418 return -1;
2419 }
2420
2421 /* If we don't have any current events, then suspend if we are supposed to */
2422 if (!nready) {
2423 API_SELECT_CB_VAR_DECLARE(select_cb);
2424
2425 if (timeout == 0) {
2426 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2427 goto return_success;
2428 }
2429 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2430 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2431
2432 /* None ready: add our semaphore to list:
2433 We don't actually need any dynamic memory. Our entry on the
2434 list is only valid while we are in this function, so it's ok
2435 to use local variables. */
2436
2437 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2438 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2439 #if LWIP_NETCONN_SEM_PER_THREAD
2440 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2441 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2442 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2443 /* failed to create semaphore */
2444 set_errno(EAGAIN);
2445 lwip_poll_dec_sockets_used(fds, nfds);
2446 API_SELECT_CB_VAR_FREE(select_cb);
2447 return -1;
2448 }
2449 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2450
2451 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2452
2453 /* Increase select_waiting for each socket we are interested in.
2454 Also, check for events again: there could have been events between
2455 the last scan (without us on the list) and putting us on the list! */
2456 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2457
2458 if (!nready) {
2459 /* Still none ready, just wait to be woken */
2460 if (timeout < 0) {
2461 /* Wait forever */
2462 msectimeout = 0;
2463 } else {
2464 /* timeout == 0 would have been handled earlier. */
2465 LWIP_ASSERT("timeout > 0", timeout > 0);
2466 msectimeout = timeout;
2467 }
2468 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2469 #if LWIP_NETCONN_SEM_PER_THREAD
2470 waited = 1;
2471 #endif
2472 }
2473
2474 /* Decrease select_waiting for each socket we are interested in,
2475 and check which events occurred while we waited. */
2476 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2477
2478 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2479
2480 #if LWIP_NETCONN_SEM_PER_THREAD
2481 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2482 /* don't leave the thread-local semaphore signalled */
2483 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2484 }
2485 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2486 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2487 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2488 API_SELECT_CB_VAR_FREE(select_cb);
2489
2490 if (nready < 0) {
2491 /* This happens when a socket got closed while waiting */
2492 lwip_poll_dec_sockets_used(fds, nfds);
2493 return -1;
2494 }
2495
2496 if (waitres == SYS_ARCH_TIMEOUT) {
2497 /* Timeout */
2498 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2499 goto return_success;
2500 }
2501 }
2502
2503 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2504 return_success:
2505 lwip_poll_dec_sockets_used(fds, nfds);
2506 set_errno(0);
2507 return nready;
2508 }
2509
2510 /**
2511 * Check whether event_callback should wake up a thread waiting in
2512 * lwip_poll.
2513 */
2514 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2515 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2516 {
2517 nfds_t fdi;
2518 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2519 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2520 if (pollfd->fd == fd) {
2521 /* Do not update pollfd->revents right here;
2522 that would be a data race because lwip_pollscan
2523 accesses revents without protecting. */
2524 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2525 return 1;
2526 }
2527 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2528 return 1;
2529 }
2530 if (has_errevent) {
2531 /* POLLERR is output only. */
2532 return 1;
2533 }
2534 }
2535 }
2536 return 0;
2537 }
2538 #endif /* LWIP_SOCKET_POLL */
2539
2540 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2541 /**
2542 * Callback registered in the netconn layer for each socket-netconn.
2543 * Processes recvevent (data available) and wakes up tasks waiting for select.
2544 *
2545 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2546 * must have the core lock held when signaling the following events
2547 * as they might cause select_list_cb to be checked:
2548 * NETCONN_EVT_RCVPLUS
2549 * NETCONN_EVT_SENDPLUS
2550 * NETCONN_EVT_ERROR
2551 * This requirement will be asserted in select_check_waiters()
2552 */
2553 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2554 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2555 {
2556 int s, check_waiters;
2557 struct lwip_sock *sock;
2558 SYS_ARCH_DECL_PROTECT(lev);
2559
2560 LWIP_UNUSED_ARG(len);
2561
2562 /* Get socket */
2563 if (conn) {
2564 s = conn->socket;
2565 if (s < 0) {
2566 /* Data comes in right away after an accept, even though
2567 * the server task might not have created a new socket yet.
2568 * Just count down (or up) if that's the case and we
2569 * will use the data later. Note that only receive events
2570 * can happen before the new socket is set up. */
2571 SYS_ARCH_PROTECT(lev);
2572 if (conn->socket < 0) {
2573 if (evt == NETCONN_EVT_RCVPLUS) {
2574 /* conn->socket is -1 on initialization
2575 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2576 conn->socket--;
2577 }
2578 SYS_ARCH_UNPROTECT(lev);
2579 return;
2580 }
2581 s = conn->socket;
2582 SYS_ARCH_UNPROTECT(lev);
2583 }
2584
2585 sock = get_socket(s);
2586 if (!sock) {
2587 return;
2588 }
2589 } else {
2590 return;
2591 }
2592
2593 check_waiters = 1;
2594 SYS_ARCH_PROTECT(lev);
2595 /* Set event as required */
2596 switch (evt) {
2597 case NETCONN_EVT_RCVPLUS:
2598 sock->rcvevent++;
2599 if (sock->rcvevent > 1) {
2600 check_waiters = 0;
2601 }
2602 break;
2603 case NETCONN_EVT_RCVMINUS:
2604 sock->rcvevent--;
2605 check_waiters = 0;
2606 break;
2607 case NETCONN_EVT_SENDPLUS:
2608 if (sock->sendevent) {
2609 check_waiters = 0;
2610 }
2611 sock->sendevent = 1;
2612 break;
2613 case NETCONN_EVT_SENDMINUS:
2614 sock->sendevent = 0;
2615 check_waiters = 0;
2616 break;
2617 case NETCONN_EVT_ERROR:
2618 sock->errevent = 1;
2619 break;
2620 default:
2621 LWIP_ASSERT("unknown event", 0);
2622 break;
2623 }
2624
2625 if (sock->select_waiting && check_waiters) {
2626 /* Save which events are active */
2627 int has_recvevent, has_sendevent, has_errevent;
2628 has_recvevent = sock->rcvevent > 0;
2629 has_sendevent = sock->sendevent != 0;
2630 has_errevent = sock->errevent != 0;
2631 SYS_ARCH_UNPROTECT(lev);
2632 /* Check any select calls waiting on this socket */
2633 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2634 } else {
2635 SYS_ARCH_UNPROTECT(lev);
2636 }
2637 poll_check_waiters(s, check_waiters);
2638 done_socket(sock);
2639 }
2640
2641 /**
2642 * Check if any select waiters are waiting on this socket and its events
2643 *
2644 * @note on synchronization of select_cb_list:
2645 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2646 * the core lock. We do a single pass through the list and signal any waiters.
2647 * Core lock should already be held when calling here!!!!
2648
2649 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2650 * of the loop, thus creating a possibility where a thread could modify the
2651 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2652 * detect this change and restart the list walk. The list is expected to be small
2653 */
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent)2654 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2655 {
2656 struct lwip_select_cb *scb;
2657 #if !LWIP_TCPIP_CORE_LOCKING
2658 int last_select_cb_ctr;
2659 SYS_ARCH_DECL_PROTECT(lev);
2660 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2661
2662 LWIP_ASSERT_CORE_LOCKED();
2663
2664 #if !LWIP_TCPIP_CORE_LOCKING
2665 SYS_ARCH_PROTECT(lev);
2666 again:
2667 /* remember the state of select_cb_list to detect changes */
2668 last_select_cb_ctr = select_cb_ctr;
2669 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2670 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2671 if (scb->sem_signalled == 0) {
2672 /* semaphore not signalled yet */
2673 int do_signal = 0;
2674 #if LWIP_SOCKET_POLL
2675 if (scb->poll_fds != NULL) {
2676 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2677 }
2678 #endif /* LWIP_SOCKET_POLL */
2679 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2680 else
2681 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2682 #if LWIP_SOCKET_SELECT
2683 {
2684 /* Test this select call for our socket */
2685 if (has_recvevent) {
2686 if (scb->readset && FD_ISSET(s, scb->readset)) {
2687 do_signal = 1;
2688 }
2689 }
2690 if (has_sendevent) {
2691 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2692 do_signal = 1;
2693 }
2694 }
2695 if (has_errevent) {
2696 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2697 do_signal = 1;
2698 }
2699 }
2700 }
2701 #endif /* LWIP_SOCKET_SELECT */
2702 if (do_signal) {
2703 scb->sem_signalled = 1;
2704 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2705 the semaphore, as this might lead to the select thread taking itself off the list,
2706 invalidating the semaphore. */
2707 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2708 }
2709 }
2710 #if LWIP_TCPIP_CORE_LOCKING
2711 }
2712 #else
2713 /* unlock interrupts with each step */
2714 SYS_ARCH_UNPROTECT(lev);
2715 /* this makes sure interrupt protection time is short */
2716 SYS_ARCH_PROTECT(lev);
2717 if (last_select_cb_ctr != select_cb_ctr) {
2718 /* someone has changed select_cb_list, restart at the beginning */
2719 goto again;
2720 }
2721 /* remember the state of select_cb_list to detect changes */
2722 last_select_cb_ctr = select_cb_ctr;
2723 }
2724 SYS_ARCH_UNPROTECT(lev);
2725 #endif
2726 }
2727 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2728
2729 /**
2730 * Close one end of a full-duplex connection.
2731 */
2732 int
lwip_shutdown(int s,int how)2733 lwip_shutdown(int s, int how)
2734 {
2735 struct lwip_sock *sock;
2736 err_t err;
2737 u8_t shut_rx = 0, shut_tx = 0;
2738
2739 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2740
2741 sock = get_socket(s);
2742 if (!sock) {
2743 return -1;
2744 }
2745
2746 if (sock->conn != NULL) {
2747 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2748 sock_set_errno(sock, EOPNOTSUPP);
2749 done_socket(sock);
2750 return -1;
2751 }
2752 } else {
2753 sock_set_errno(sock, ENOTCONN);
2754 done_socket(sock);
2755 return -1;
2756 }
2757
2758 if (how == SHUT_RD) {
2759 shut_rx = 1;
2760 } else if (how == SHUT_WR) {
2761 shut_tx = 1;
2762 } else if (how == SHUT_RDWR) {
2763 shut_rx = 1;
2764 shut_tx = 1;
2765 } else {
2766 sock_set_errno(sock, EINVAL);
2767 done_socket(sock);
2768 return -1;
2769 }
2770 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2771
2772 sock_set_errno(sock, err_to_errno(err));
2773 done_socket(sock);
2774 return (err == ERR_OK ? 0 : -1);
2775 }
2776
2777 static int
lwip_getaddrname(int s,struct sockaddr * name,socklen_t * namelen,u8_t local)2778 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2779 {
2780 struct lwip_sock *sock;
2781 union sockaddr_aligned saddr;
2782 ip_addr_t naddr;
2783 u16_t port;
2784 err_t err;
2785
2786 sock = get_socket(s);
2787 if (!sock) {
2788 return -1;
2789 }
2790
2791 /* get the IP address and port */
2792 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2793 if (err != ERR_OK) {
2794 sock_set_errno(sock, err_to_errno(err));
2795 done_socket(sock);
2796 return -1;
2797 }
2798
2799 #if LWIP_IPV4 && LWIP_IPV6
2800 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2801 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2802 IP_IS_V4_VAL(naddr)) {
2803 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2804 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2805 }
2806 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2807
2808 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2809
2810 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2811 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2812 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2813
2814 if (*namelen > saddr.sa.sa_len) {
2815 *namelen = saddr.sa.sa_len;
2816 }
2817 MEMCPY(name, &saddr, *namelen);
2818
2819 sock_set_errno(sock, 0);
2820 done_socket(sock);
2821 return 0;
2822 }
2823
2824 int
lwip_getpeername(int s,struct sockaddr * name,socklen_t * namelen)2825 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2826 {
2827 return lwip_getaddrname(s, name, namelen, 0);
2828 }
2829
2830 int
lwip_getsockname(int s,struct sockaddr * name,socklen_t * namelen)2831 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2832 {
2833 return lwip_getaddrname(s, name, namelen, 1);
2834 }
2835
2836 int
lwip_getsockopt(int s,int level,int optname,void * optval,socklen_t * optlen)2837 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2838 {
2839 int err;
2840 struct lwip_sock *sock = get_socket(s);
2841 #if !LWIP_TCPIP_CORE_LOCKING
2842 err_t cberr;
2843 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2844 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2845
2846 if (!sock) {
2847 return -1;
2848 }
2849
2850 if ((NULL == optval) || (NULL == optlen)) {
2851 sock_set_errno(sock, EFAULT);
2852 done_socket(sock);
2853 return -1;
2854 }
2855
2856 #if LWIP_TCPIP_CORE_LOCKING
2857 /* core-locking can just call the -impl function */
2858 LOCK_TCPIP_CORE();
2859 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2860 UNLOCK_TCPIP_CORE();
2861
2862 #else /* LWIP_TCPIP_CORE_LOCKING */
2863
2864 #if LWIP_MPU_COMPATIBLE
2865 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2866 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2867 sock_set_errno(sock, ENOBUFS);
2868 done_socket(sock);
2869 return -1;
2870 }
2871 #endif /* LWIP_MPU_COMPATIBLE */
2872
2873 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2874 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2875 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2876 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2877 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2878 #if !LWIP_MPU_COMPATIBLE
2879 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2880 #endif /* !LWIP_MPU_COMPATIBLE */
2881 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2882 #if LWIP_NETCONN_SEM_PER_THREAD
2883 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2884 #else
2885 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2886 #endif
2887 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2888 if (cberr != ERR_OK) {
2889 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2890 sock_set_errno(sock, err_to_errno(cberr));
2891 done_socket(sock);
2892 return -1;
2893 }
2894 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2895
2896 /* write back optlen and optval */
2897 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2898 #if LWIP_MPU_COMPATIBLE
2899 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2900 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2901 #endif /* LWIP_MPU_COMPATIBLE */
2902
2903 /* maybe lwip_getsockopt_internal has changed err */
2904 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2905 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2906 #endif /* LWIP_TCPIP_CORE_LOCKING */
2907
2908 sock_set_errno(sock, err);
2909 done_socket(sock);
2910 return err ? -1 : 0;
2911 }
2912
2913 #if !LWIP_TCPIP_CORE_LOCKING
2914 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2915 * to get into the tcpip_thread
2916 */
2917 static void
lwip_getsockopt_callback(void * arg)2918 lwip_getsockopt_callback(void *arg)
2919 {
2920 struct lwip_setgetsockopt_data *data;
2921 LWIP_ASSERT("arg != NULL", arg != NULL);
2922 data = (struct lwip_setgetsockopt_data *)arg;
2923
2924 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2925 #if LWIP_MPU_COMPATIBLE
2926 data->optval,
2927 #else /* LWIP_MPU_COMPATIBLE */
2928 data->optval.p,
2929 #endif /* LWIP_MPU_COMPATIBLE */
2930 &data->optlen);
2931
2932 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2933 }
2934 #endif /* LWIP_TCPIP_CORE_LOCKING */
2935
2936 static int
lwip_sockopt_to_ipopt(int optname)2937 lwip_sockopt_to_ipopt(int optname)
2938 {
2939 /* Map SO_* values to our internal SOF_* values
2940 * We should not rely on #defines in socket.h
2941 * being in sync with ip.h.
2942 */
2943 switch (optname) {
2944 case SO_BROADCAST:
2945 return SOF_BROADCAST;
2946 case SO_KEEPALIVE:
2947 return SOF_KEEPALIVE;
2948 case SO_REUSEADDR:
2949 return SOF_REUSEADDR;
2950 default:
2951 LWIP_ASSERT("Unknown socket option", 0);
2952 return 0;
2953 }
2954 }
2955
2956 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2957 * same argument as lwip_getsockopt, either called directly or through callback
2958 */
2959 static int
lwip_getsockopt_impl(int s,int level,int optname,void * optval,socklen_t * optlen)2960 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2961 {
2962 int err = 0;
2963 struct lwip_sock *sock = tryget_socket(s);
2964 if (!sock) {
2965 return EBADF;
2966 }
2967
2968 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2969 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2970 return err;
2971 }
2972 #endif
2973
2974 switch (level) {
2975
2976 /* Level: SOL_SOCKET */
2977 case SOL_SOCKET:
2978 switch (optname) {
2979
2980 #if LWIP_TCP
2981 case SO_ACCEPTCONN:
2982 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2983 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2984 done_socket(sock);
2985 return ENOPROTOOPT;
2986 }
2987 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2988 *(int *)optval = 1;
2989 } else {
2990 *(int *)optval = 0;
2991 }
2992 break;
2993 #endif /* LWIP_TCP */
2994
2995 /* The option flags */
2996 case SO_BROADCAST:
2997 case SO_KEEPALIVE:
2998 #if SO_REUSE
2999 case SO_REUSEADDR:
3000 #endif /* SO_REUSE */
3001 if ((optname == SO_BROADCAST) &&
3002 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3003 done_socket(sock);
3004 return ENOPROTOOPT;
3005 }
3006
3007 optname = lwip_sockopt_to_ipopt(optname);
3008
3009 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3010 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
3011 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
3012 s, optname, (*(int *)optval ? "on" : "off")));
3013 break;
3014
3015 case SO_TYPE:
3016 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3017 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3018 case NETCONN_RAW:
3019 *(int *)optval = SOCK_RAW;
3020 break;
3021 case NETCONN_TCP:
3022 *(int *)optval = SOCK_STREAM;
3023 break;
3024 case NETCONN_UDP:
3025 *(int *)optval = SOCK_DGRAM;
3026 break;
3027 default: /* unrecognized socket type */
3028 *(int *)optval = netconn_type(sock->conn);
3029 LWIP_DEBUGF(SOCKETS_DEBUG,
3030 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
3031 s, *(int *)optval));
3032 } /* switch (netconn_type(sock->conn)) */
3033 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
3034 s, *(int *)optval));
3035 break;
3036
3037 case SO_ERROR:
3038 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
3039 *(int *)optval = err_to_errno(netconn_err(sock->conn));
3040 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
3041 s, *(int *)optval));
3042 break;
3043
3044 #if LWIP_SO_SNDTIMEO
3045 case SO_SNDTIMEO:
3046 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3047 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
3048 break;
3049 #endif /* LWIP_SO_SNDTIMEO */
3050 #if LWIP_SO_RCVTIMEO
3051 case SO_RCVTIMEO:
3052 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3053 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
3054 break;
3055 #endif /* LWIP_SO_RCVTIMEO */
3056 #if LWIP_SO_RCVBUF
3057 case SO_RCVBUF:
3058 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3059 *(int *)optval = netconn_get_recvbufsize(sock->conn);
3060 break;
3061 #endif /* LWIP_SO_RCVBUF */
3062 #if LWIP_SO_LINGER
3063 case SO_LINGER: {
3064 s16_t conn_linger;
3065 struct linger *linger = (struct linger *)optval;
3066 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
3067 conn_linger = sock->conn->linger;
3068 if (conn_linger >= 0) {
3069 linger->l_onoff = 1;
3070 linger->l_linger = (int)conn_linger;
3071 } else {
3072 linger->l_onoff = 0;
3073 linger->l_linger = 0;
3074 }
3075 }
3076 break;
3077 #endif /* LWIP_SO_LINGER */
3078 #if LWIP_UDP
3079 case SO_NO_CHECK:
3080 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3081 #if LWIP_UDPLITE
3082 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3083 /* this flag is only available for UDP, not for UDP lite */
3084 done_socket(sock);
3085 return EAFNOSUPPORT;
3086 }
3087 #endif /* LWIP_UDPLITE */
3088 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3089 break;
3090 #endif /* LWIP_UDP*/
3091 default:
3092 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3093 s, optname));
3094 err = ENOPROTOOPT;
3095 break;
3096 } /* switch (optname) */
3097 break;
3098
3099 /* Level: IPPROTO_IP */
3100 case IPPROTO_IP:
3101 switch (optname) {
3102 case IP_TTL:
3103 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3104 *(int *)optval = sock->conn->pcb.ip->ttl;
3105 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3106 s, *(int *)optval));
3107 break;
3108 case IP_TOS:
3109 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3110 *(int *)optval = sock->conn->pcb.ip->tos;
3111 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3112 s, *(int *)optval));
3113 break;
3114 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3115 case IP_MULTICAST_TTL:
3116 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3117 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3118 done_socket(sock);
3119 return ENOPROTOOPT;
3120 }
3121 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3122 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3123 s, *(int *)optval));
3124 break;
3125 case IP_MULTICAST_IF:
3126 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3127 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3128 done_socket(sock);
3129 return ENOPROTOOPT;
3130 }
3131 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3132 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3133 s, *(u32_t *)optval));
3134 break;
3135 case IP_MULTICAST_LOOP:
3136 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3137 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3138 *(u8_t *)optval = 1;
3139 } else {
3140 *(u8_t *)optval = 0;
3141 }
3142 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3143 s, *(int *)optval));
3144 break;
3145 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3146 default:
3147 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3148 s, optname));
3149 err = ENOPROTOOPT;
3150 break;
3151 } /* switch (optname) */
3152 break;
3153
3154 #if LWIP_TCP
3155 /* Level: IPPROTO_TCP */
3156 case IPPROTO_TCP:
3157 /* Special case: all IPPROTO_TCP option take an int */
3158 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3159 if (sock->conn->pcb.tcp->state == LISTEN) {
3160 done_socket(sock);
3161 return EINVAL;
3162 }
3163 switch (optname) {
3164 case TCP_NODELAY:
3165 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3166 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3167 s, (*(int *)optval) ? "on" : "off") );
3168 break;
3169 case TCP_KEEPALIVE:
3170 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3171 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3172 s, *(int *)optval));
3173 break;
3174
3175 #if LWIP_TCP_KEEPALIVE
3176 case TCP_KEEPIDLE:
3177 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3178 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3179 s, *(int *)optval));
3180 break;
3181 case TCP_KEEPINTVL:
3182 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3183 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3184 s, *(int *)optval));
3185 break;
3186 case TCP_KEEPCNT:
3187 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3188 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3189 s, *(int *)optval));
3190 break;
3191 #endif /* LWIP_TCP_KEEPALIVE */
3192 default:
3193 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3194 s, optname));
3195 err = ENOPROTOOPT;
3196 break;
3197 } /* switch (optname) */
3198 break;
3199 #endif /* LWIP_TCP */
3200
3201 #if LWIP_IPV6
3202 /* Level: IPPROTO_IPV6 */
3203 case IPPROTO_IPV6:
3204 switch (optname) {
3205 case IPV6_V6ONLY:
3206 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3207 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3208 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3209 s, *(int *)optval));
3210 break;
3211 default:
3212 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3213 s, optname));
3214 err = ENOPROTOOPT;
3215 break;
3216 } /* switch (optname) */
3217 break;
3218 #endif /* LWIP_IPV6 */
3219
3220 #if LWIP_UDP && LWIP_UDPLITE
3221 /* Level: IPPROTO_UDPLITE */
3222 case IPPROTO_UDPLITE:
3223 /* Special case: all IPPROTO_UDPLITE option take an int */
3224 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3225 /* If this is no UDP lite socket, ignore any options. */
3226 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3227 done_socket(sock);
3228 return ENOPROTOOPT;
3229 }
3230 switch (optname) {
3231 case UDPLITE_SEND_CSCOV:
3232 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3233 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3234 s, (*(int *)optval)) );
3235 break;
3236 case UDPLITE_RECV_CSCOV:
3237 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3238 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3239 s, (*(int *)optval)) );
3240 break;
3241 default:
3242 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3243 s, optname));
3244 err = ENOPROTOOPT;
3245 break;
3246 } /* switch (optname) */
3247 break;
3248 #endif /* LWIP_UDP */
3249 /* Level: IPPROTO_RAW */
3250 case IPPROTO_RAW:
3251 switch (optname) {
3252 #if LWIP_IPV6 && LWIP_RAW
3253 case IPV6_CHECKSUM:
3254 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3255 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3256 *(int *)optval = -1;
3257 } else {
3258 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3259 }
3260 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3261 s, (*(int *)optval)) );
3262 break;
3263 #endif /* LWIP_IPV6 && LWIP_RAW */
3264 default:
3265 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3266 s, optname));
3267 err = ENOPROTOOPT;
3268 break;
3269 } /* switch (optname) */
3270 break;
3271 default:
3272 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3273 s, level, optname));
3274 err = ENOPROTOOPT;
3275 break;
3276 } /* switch (level) */
3277
3278 done_socket(sock);
3279 return err;
3280 }
3281
3282 int
lwip_setsockopt(int s,int level,int optname,const void * optval,socklen_t optlen)3283 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3284 {
3285 int err = 0;
3286 struct lwip_sock *sock = get_socket(s);
3287 #if !LWIP_TCPIP_CORE_LOCKING
3288 err_t cberr;
3289 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3290 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3291
3292 if (!sock) {
3293 return -1;
3294 }
3295
3296 if (NULL == optval) {
3297 sock_set_errno(sock, EFAULT);
3298 done_socket(sock);
3299 return -1;
3300 }
3301
3302 #if LWIP_TCPIP_CORE_LOCKING
3303 /* core-locking can just call the -impl function */
3304 LOCK_TCPIP_CORE();
3305 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3306 UNLOCK_TCPIP_CORE();
3307 #if LWIP_LOWPOWER
3308 tcpip_send_msg_na(LOW_NON_BLOCK);
3309 #endif
3310
3311 #else /* LWIP_TCPIP_CORE_LOCKING */
3312
3313 #if LWIP_MPU_COMPATIBLE
3314 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3315 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3316 sock_set_errno(sock, ENOBUFS);
3317 done_socket(sock);
3318 return -1;
3319 }
3320 #endif /* LWIP_MPU_COMPATIBLE */
3321
3322 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3323 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3324 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3325 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3326 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3327 #if LWIP_MPU_COMPATIBLE
3328 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3329 #else /* LWIP_MPU_COMPATIBLE */
3330 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3331 #endif /* LWIP_MPU_COMPATIBLE */
3332 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3333 #if LWIP_NETCONN_SEM_PER_THREAD
3334 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3335 #else
3336 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3337 #endif
3338 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3339 if (cberr != ERR_OK) {
3340 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3341 sock_set_errno(sock, err_to_errno(cberr));
3342 done_socket(sock);
3343 return -1;
3344 }
3345 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3346
3347 /* maybe lwip_getsockopt_internal has changed err */
3348 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3349 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3350 #endif /* LWIP_TCPIP_CORE_LOCKING */
3351
3352 sock_set_errno(sock, err);
3353 done_socket(sock);
3354 return err ? -1 : 0;
3355 }
3356
3357 #if !LWIP_TCPIP_CORE_LOCKING
3358 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3359 * to get into the tcpip_thread
3360 */
3361 static void
lwip_setsockopt_callback(void * arg)3362 lwip_setsockopt_callback(void *arg)
3363 {
3364 struct lwip_setgetsockopt_data *data;
3365 LWIP_ASSERT("arg != NULL", arg != NULL);
3366 data = (struct lwip_setgetsockopt_data *)arg;
3367
3368 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3369 #if LWIP_MPU_COMPATIBLE
3370 data->optval,
3371 #else /* LWIP_MPU_COMPATIBLE */
3372 data->optval.pc,
3373 #endif /* LWIP_MPU_COMPATIBLE */
3374 data->optlen);
3375
3376 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3377 }
3378 #endif /* LWIP_TCPIP_CORE_LOCKING */
3379
3380 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3381 * same argument as lwip_setsockopt, either called directly or through callback
3382 */
3383 static int
lwip_setsockopt_impl(int s,int level,int optname,const void * optval,socklen_t optlen)3384 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3385 {
3386 int err = 0;
3387 struct lwip_sock *sock = tryget_socket(s);
3388 if (!sock) {
3389 return EBADF;
3390 }
3391
3392 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3393 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3394 return err;
3395 }
3396 #endif
3397
3398 switch (level) {
3399
3400 /* Level: SOL_SOCKET */
3401 case SOL_SOCKET:
3402 switch (optname) {
3403
3404 /* SO_ACCEPTCONN is get-only */
3405
3406 /* The option flags */
3407 case SO_BROADCAST:
3408 case SO_KEEPALIVE:
3409 #if SO_REUSE
3410 case SO_REUSEADDR:
3411 #endif /* SO_REUSE */
3412 if ((optname == SO_BROADCAST) &&
3413 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3414 done_socket(sock);
3415 return ENOPROTOOPT;
3416 }
3417
3418 optname = lwip_sockopt_to_ipopt(optname);
3419
3420 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3421 if (*(const int *)optval) {
3422 ip_set_option(sock->conn->pcb.ip, optname);
3423 } else {
3424 ip_reset_option(sock->conn->pcb.ip, optname);
3425 }
3426 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3427 s, optname, (*(const int *)optval ? "on" : "off")));
3428 break;
3429
3430 /* SO_TYPE is get-only */
3431 /* SO_ERROR is get-only */
3432
3433 #if LWIP_SO_SNDTIMEO
3434 case SO_SNDTIMEO: {
3435 long ms_long;
3436 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3437 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3438 if (ms_long < 0) {
3439 done_socket(sock);
3440 return EINVAL;
3441 }
3442 netconn_set_sendtimeout(sock->conn, ms_long);
3443 break;
3444 }
3445 #endif /* LWIP_SO_SNDTIMEO */
3446 #if LWIP_SO_RCVTIMEO
3447 case SO_RCVTIMEO: {
3448 long ms_long;
3449 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3450 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3451 if (ms_long < 0) {
3452 done_socket(sock);
3453 return EINVAL;
3454 }
3455 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3456 break;
3457 }
3458 #endif /* LWIP_SO_RCVTIMEO */
3459 #if LWIP_SO_RCVBUF
3460 case SO_RCVBUF:
3461 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3462 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3463 break;
3464 #endif /* LWIP_SO_RCVBUF */
3465 #if LWIP_SO_LINGER
3466 case SO_LINGER: {
3467 const struct linger *linger = (const struct linger *)optval;
3468 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3469 if (linger->l_onoff) {
3470 int lingersec = linger->l_linger;
3471 if (lingersec < 0) {
3472 done_socket(sock);
3473 return EINVAL;
3474 }
3475 if (lingersec > 0xFFFF) {
3476 lingersec = 0xFFFF;
3477 }
3478 sock->conn->linger = (s16_t)lingersec;
3479 } else {
3480 sock->conn->linger = -1;
3481 }
3482 }
3483 break;
3484 #endif /* LWIP_SO_LINGER */
3485 #if LWIP_UDP
3486 case SO_NO_CHECK:
3487 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3488 #if LWIP_UDPLITE
3489 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3490 /* this flag is only available for UDP, not for UDP lite */
3491 done_socket(sock);
3492 return EAFNOSUPPORT;
3493 }
3494 #endif /* LWIP_UDPLITE */
3495 if (*(const int *)optval) {
3496 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3497 } else {
3498 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3499 }
3500 break;
3501 #endif /* LWIP_UDP */
3502 case SO_BINDTODEVICE: {
3503 const struct ifreq *iface;
3504 struct netif *n = NULL;
3505
3506 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3507
3508 iface = (const struct ifreq *)optval;
3509 if (iface->ifr_name[0] != 0) {
3510 n = netif_find(iface->ifr_name);
3511 if (n == NULL) {
3512 done_socket(sock);
3513 return ENODEV;
3514 }
3515 }
3516
3517 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3518 #if LWIP_TCP
3519 case NETCONN_TCP:
3520 tcp_bind_netif(sock->conn->pcb.tcp, n);
3521 break;
3522 #endif
3523 #if LWIP_UDP
3524 case NETCONN_UDP:
3525 udp_bind_netif(sock->conn->pcb.udp, n);
3526 break;
3527 #endif
3528 #if LWIP_RAW
3529 case NETCONN_RAW:
3530 raw_bind_netif(sock->conn->pcb.raw, n);
3531 break;
3532 #endif
3533 default:
3534 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3535 break;
3536 }
3537 }
3538 break;
3539 default:
3540 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3541 s, optname));
3542 err = ENOPROTOOPT;
3543 break;
3544 } /* switch (optname) */
3545 break;
3546
3547 /* Level: IPPROTO_IP */
3548 case IPPROTO_IP:
3549 switch (optname) {
3550 case IP_TTL:
3551 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3552 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3553 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3554 s, sock->conn->pcb.ip->ttl));
3555 break;
3556 case IP_TOS:
3557 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3558 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3559 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3560 s, sock->conn->pcb.ip->tos));
3561 break;
3562 #if LWIP_NETBUF_RECVINFO
3563 case IP_PKTINFO:
3564 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3565 if (*(const int *)optval) {
3566 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3567 } else {
3568 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3569 }
3570 break;
3571 #endif /* LWIP_NETBUF_RECVINFO */
3572 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3573 case IP_MULTICAST_TTL:
3574 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3575 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3576 break;
3577 case IP_MULTICAST_IF: {
3578 ip4_addr_t if_addr;
3579 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3580 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3581 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3582 }
3583 break;
3584 case IP_MULTICAST_LOOP:
3585 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3586 if (*(const u8_t *)optval) {
3587 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3588 } else {
3589 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3590 }
3591 break;
3592 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3593 #if LWIP_IGMP
3594 case IP_ADD_MEMBERSHIP:
3595 case IP_DROP_MEMBERSHIP: {
3596 /* If this is a TCP or a RAW socket, ignore these options. */
3597 err_t igmp_err;
3598 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3599 ip4_addr_t if_addr;
3600 ip4_addr_t multi_addr;
3601 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3602 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3603 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3604 if (optname == IP_ADD_MEMBERSHIP) {
3605 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3606 /* cannot track membership (out of memory) */
3607 err = ENOMEM;
3608 igmp_err = ERR_OK;
3609 } else {
3610 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3611 }
3612 } else {
3613 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3614 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3615 }
3616 if (igmp_err != ERR_OK) {
3617 err = EADDRNOTAVAIL;
3618 }
3619 }
3620 break;
3621 #endif /* LWIP_IGMP */
3622 default:
3623 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3624 s, optname));
3625 err = ENOPROTOOPT;
3626 break;
3627 } /* switch (optname) */
3628 break;
3629
3630 #if LWIP_TCP
3631 /* Level: IPPROTO_TCP */
3632 case IPPROTO_TCP:
3633 /* Special case: all IPPROTO_TCP option take an int */
3634 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3635 if (sock->conn->pcb.tcp->state == LISTEN) {
3636 done_socket(sock);
3637 return EINVAL;
3638 }
3639 switch (optname) {
3640 case TCP_NODELAY:
3641 if (*(const int *)optval) {
3642 tcp_nagle_disable(sock->conn->pcb.tcp);
3643 } else {
3644 tcp_nagle_enable(sock->conn->pcb.tcp);
3645 }
3646 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3647 s, (*(const int *)optval) ? "on" : "off") );
3648 break;
3649 case TCP_KEEPALIVE:
3650 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3651 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3652 s, sock->conn->pcb.tcp->keep_idle));
3653 break;
3654
3655 #if LWIP_TCP_KEEPALIVE
3656 case TCP_KEEPIDLE:
3657 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3658 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3659 s, sock->conn->pcb.tcp->keep_idle));
3660 break;
3661 case TCP_KEEPINTVL:
3662 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3663 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3664 s, sock->conn->pcb.tcp->keep_intvl));
3665 break;
3666 case TCP_KEEPCNT:
3667 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3668 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3669 s, sock->conn->pcb.tcp->keep_cnt));
3670 break;
3671 #endif /* LWIP_TCP_KEEPALIVE */
3672 default:
3673 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3674 s, optname));
3675 err = ENOPROTOOPT;
3676 break;
3677 } /* switch (optname) */
3678 break;
3679 #endif /* LWIP_TCP*/
3680
3681 #if LWIP_IPV6
3682 /* Level: IPPROTO_IPV6 */
3683 case IPPROTO_IPV6:
3684 switch (optname) {
3685 case IPV6_V6ONLY:
3686 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3687 if (*(const int *)optval) {
3688 netconn_set_ipv6only(sock->conn, 1);
3689 } else {
3690 netconn_set_ipv6only(sock->conn, 0);
3691 }
3692 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3693 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3694 break;
3695 #if LWIP_IPV6_MLD
3696 case IPV6_JOIN_GROUP:
3697 case IPV6_LEAVE_GROUP: {
3698 /* If this is a TCP or a RAW socket, ignore these options. */
3699 err_t mld6_err;
3700 struct netif *netif;
3701 ip6_addr_t multi_addr;
3702 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3703 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3704 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3705 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3706 #ifdef LOSCFG_NET_CONTAINER
3707 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface, get_net_group_from_ippcb(sock->conn->pcb.ip));
3708 #else
3709 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3710 #endif
3711 if (netif == NULL) {
3712 err = EADDRNOTAVAIL;
3713 break;
3714 }
3715
3716 if (optname == IPV6_JOIN_GROUP) {
3717 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3718 /* cannot track membership (out of memory) */
3719 err = ENOMEM;
3720 mld6_err = ERR_OK;
3721 } else {
3722 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3723 }
3724 } else {
3725 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3726 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3727 }
3728 if (mld6_err != ERR_OK) {
3729 err = EADDRNOTAVAIL;
3730 }
3731 }
3732 break;
3733 #endif /* LWIP_IPV6_MLD */
3734 default:
3735 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3736 s, optname));
3737 err = ENOPROTOOPT;
3738 break;
3739 } /* switch (optname) */
3740 break;
3741 #endif /* LWIP_IPV6 */
3742
3743 #if LWIP_UDP && LWIP_UDPLITE
3744 /* Level: IPPROTO_UDPLITE */
3745 case IPPROTO_UDPLITE:
3746 /* Special case: all IPPROTO_UDPLITE option take an int */
3747 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3748 /* If this is no UDP lite socket, ignore any options. */
3749 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3750 done_socket(sock);
3751 return ENOPROTOOPT;
3752 }
3753 switch (optname) {
3754 case UDPLITE_SEND_CSCOV:
3755 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3756 /* don't allow illegal values! */
3757 sock->conn->pcb.udp->chksum_len_tx = 8;
3758 } else {
3759 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3760 }
3761 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3762 s, (*(const int *)optval)) );
3763 break;
3764 case UDPLITE_RECV_CSCOV:
3765 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3766 /* don't allow illegal values! */
3767 sock->conn->pcb.udp->chksum_len_rx = 8;
3768 } else {
3769 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3770 }
3771 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3772 s, (*(const int *)optval)) );
3773 break;
3774 default:
3775 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3776 s, optname));
3777 err = ENOPROTOOPT;
3778 break;
3779 } /* switch (optname) */
3780 break;
3781 #endif /* LWIP_UDP */
3782 /* Level: IPPROTO_RAW */
3783 case IPPROTO_RAW:
3784 switch (optname) {
3785 #if LWIP_IPV6 && LWIP_RAW
3786 case IPV6_CHECKSUM:
3787 /* It should not be possible to disable the checksum generation with ICMPv6
3788 * as per RFC 3542 chapter 3.1 */
3789 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3790 done_socket(sock);
3791 return EINVAL;
3792 }
3793
3794 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3795 if (*(const int *)optval < 0) {
3796 sock->conn->pcb.raw->chksum_reqd = 0;
3797 } else if (*(const int *)optval & 1) {
3798 /* Per RFC3542, odd offsets are not allowed */
3799 done_socket(sock);
3800 return EINVAL;
3801 } else {
3802 sock->conn->pcb.raw->chksum_reqd = 1;
3803 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3804 }
3805 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3806 s, sock->conn->pcb.raw->chksum_reqd));
3807 break;
3808 #endif /* LWIP_IPV6 && LWIP_RAW */
3809 default:
3810 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3811 s, optname));
3812 err = ENOPROTOOPT;
3813 break;
3814 } /* switch (optname) */
3815 break;
3816 default:
3817 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3818 s, level, optname));
3819 err = ENOPROTOOPT;
3820 break;
3821 } /* switch (level) */
3822
3823 done_socket(sock);
3824 return err;
3825 }
3826
3827 int
lwip_ioctl(int s,long cmd,void * argp)3828 lwip_ioctl(int s, long cmd, void *argp)
3829 {
3830 struct lwip_sock *sock = get_socket(s);
3831 u8_t val;
3832 #if LWIP_SO_RCVBUF
3833 int recv_avail;
3834 #endif /* LWIP_SO_RCVBUF */
3835
3836 if (!sock) {
3837 return -1;
3838 }
3839
3840 switch (cmd) {
3841 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3842 case FIONREAD:
3843 if (!argp) {
3844 sock_set_errno(sock, EINVAL);
3845 done_socket(sock);
3846 return -1;
3847 }
3848 #if LWIP_FIONREAD_LINUXMODE
3849 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3850 struct netbuf *nb;
3851 if (sock->lastdata.netbuf) {
3852 nb = sock->lastdata.netbuf;
3853 *((int *)argp) = nb->p->tot_len;
3854 } else {
3855 struct netbuf *rxbuf;
3856 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3857 if (err != ERR_OK) {
3858 *((int *)argp) = 0;
3859 } else {
3860 sock->lastdata.netbuf = rxbuf;
3861 *((int *)argp) = rxbuf->p->tot_len;
3862 }
3863 }
3864 done_socket(sock);
3865 return 0;
3866 }
3867 #endif /* LWIP_FIONREAD_LINUXMODE */
3868
3869 #if LWIP_SO_RCVBUF
3870 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3871 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3872 if (recv_avail < 0) {
3873 recv_avail = 0;
3874 }
3875
3876 /* Check if there is data left from the last recv operation. /maq 041215 */
3877 if (sock->lastdata.netbuf) {
3878 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3879 recv_avail += sock->lastdata.pbuf->tot_len;
3880 } else {
3881 recv_avail += sock->lastdata.netbuf->p->tot_len;
3882 }
3883 }
3884 *((int *)argp) = recv_avail;
3885
3886 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3887 sock_set_errno(sock, 0);
3888 done_socket(sock);
3889 return 0;
3890 #else /* LWIP_SO_RCVBUF */
3891 break;
3892 #endif /* LWIP_SO_RCVBUF */
3893 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3894
3895 case (long)FIONBIO:
3896 val = 0;
3897 if (argp && *(int *)argp) {
3898 val = 1;
3899 }
3900 netconn_set_nonblocking(sock->conn, val);
3901 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3902 sock_set_errno(sock, 0);
3903 done_socket(sock);
3904 return 0;
3905
3906 default:
3907 IOCTL_CMD_CASE_HANDLER();
3908 break;
3909 } /* switch (cmd) */
3910 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3911 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3912 done_socket(sock);
3913 return -1;
3914 }
3915
3916 /** A minimal implementation of fcntl.
3917 * Currently only the commands F_GETFL and F_SETFL are implemented.
3918 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3919 * the flag O_NONBLOCK is implemented for F_SETFL.
3920 */
3921 int
lwip_fcntl(int s,int cmd,int val)3922 lwip_fcntl(int s, int cmd, int val)
3923 {
3924 struct lwip_sock *sock = get_socket(s);
3925 int ret = -1;
3926 int op_mode = 0;
3927
3928 if (!sock) {
3929 return -1;
3930 }
3931
3932 switch (cmd) {
3933 case F_GETFL:
3934 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3935 sock_set_errno(sock, 0);
3936
3937 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3938 #if LWIP_TCPIP_CORE_LOCKING
3939 LOCK_TCPIP_CORE();
3940 #else
3941 SYS_ARCH_DECL_PROTECT(lev);
3942 /* the proper thing to do here would be to get into the tcpip_thread,
3943 but locking should be OK as well since we only *read* some flags */
3944 SYS_ARCH_PROTECT(lev);
3945 #endif
3946 #if LWIP_TCP
3947 if (sock->conn->pcb.tcp) {
3948 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3949 op_mode |= O_RDONLY;
3950 }
3951 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3952 op_mode |= O_WRONLY;
3953 }
3954 }
3955 #endif
3956 #if LWIP_TCPIP_CORE_LOCKING
3957 UNLOCK_TCPIP_CORE();
3958 #else
3959 SYS_ARCH_UNPROTECT(lev);
3960 #endif
3961 } else {
3962 op_mode |= O_RDWR;
3963 }
3964
3965 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3966 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3967
3968 break;
3969 case F_SETFL:
3970 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3971 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3972 if ((val & ~O_NONBLOCK) == 0) {
3973 /* only O_NONBLOCK, all other bits are zero */
3974 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3975 ret = 0;
3976 sock_set_errno(sock, 0);
3977 } else {
3978 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3979 }
3980 break;
3981 default:
3982 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3983 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3984 break;
3985 }
3986 done_socket(sock);
3987 return ret;
3988 }
3989
3990 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3991 int
fcntl(int s,int cmd,...)3992 fcntl(int s, int cmd, ...)
3993 {
3994 va_list ap;
3995 int val;
3996
3997 va_start(ap, cmd);
3998 val = va_arg(ap, int);
3999 va_end(ap);
4000 return lwip_fcntl(s, cmd, val);
4001 }
4002 #endif
4003
4004 const char *
lwip_inet_ntop(int af,const void * src,char * dst,socklen_t size)4005 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
4006 {
4007 const char *ret = NULL;
4008 int size_int = (int)size;
4009 if (size_int < 0) {
4010 set_errno(ENOSPC);
4011 return NULL;
4012 }
4013 switch (af) {
4014 #if LWIP_IPV4
4015 case AF_INET:
4016 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
4017 if (ret == NULL) {
4018 set_errno(ENOSPC);
4019 }
4020 break;
4021 #endif
4022 #if LWIP_IPV6
4023 case AF_INET6:
4024 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
4025 if (ret == NULL) {
4026 set_errno(ENOSPC);
4027 }
4028 break;
4029 #endif
4030 default:
4031 set_errno(EAFNOSUPPORT);
4032 break;
4033 }
4034 return ret;
4035 }
4036
4037 int
lwip_inet_pton(int af,const char * src,void * dst)4038 lwip_inet_pton(int af, const char *src, void *dst)
4039 {
4040 int err;
4041 switch (af) {
4042 #if LWIP_IPV4
4043 case AF_INET:
4044 err = ip4addr_aton(src, (ip4_addr_t *)dst);
4045 break;
4046 #endif
4047 #if LWIP_IPV6
4048 case AF_INET6: {
4049 /* convert into temporary variable since ip6_addr_t might be larger
4050 than in6_addr when scopes are enabled */
4051 ip6_addr_t addr;
4052 err = ip6addr_aton(src, &addr);
4053 if (err) {
4054 memcpy(dst, &addr.addr, sizeof(addr.addr));
4055 }
4056 break;
4057 }
4058 #endif
4059 default:
4060 err = -1;
4061 set_errno(EAFNOSUPPORT);
4062 break;
4063 }
4064 return err;
4065 }
4066
4067 #if LWIP_IGMP
4068 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
4069 *
4070 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4071 *
4072 * @return 1 on success, 0 on failure
4073 */
4074 static int
lwip_socket_register_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4075 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4076 {
4077 struct lwip_sock *sock = get_socket(s);
4078 int i;
4079
4080 if (!sock) {
4081 return 0;
4082 }
4083
4084 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4085 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4086 socket_ipv4_multicast_memberships[i].sock = sock;
4087 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4088 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4089 done_socket(sock);
4090 return 1;
4091 }
4092 }
4093 done_socket(sock);
4094 return 0;
4095 }
4096
4097 /** Unregister a previously registered membership. This prevents dropping the membership
4098 * on socket close.
4099 *
4100 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4101 */
4102 static void
lwip_socket_unregister_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4103 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4104 {
4105 struct lwip_sock *sock = get_socket(s);
4106 int i;
4107
4108 if (!sock) {
4109 return;
4110 }
4111
4112 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4113 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4114 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4115 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4116 socket_ipv4_multicast_memberships[i].sock = NULL;
4117 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4118 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4119 break;
4120 }
4121 }
4122 done_socket(sock);
4123 }
4124
4125 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4126 *
4127 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4128 */
4129 static void
lwip_socket_drop_registered_memberships(int s)4130 lwip_socket_drop_registered_memberships(int s)
4131 {
4132 struct lwip_sock *sock = get_socket(s);
4133 int i;
4134
4135 if (!sock) {
4136 return;
4137 }
4138
4139 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4140 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4141 ip_addr_t multi_addr, if_addr;
4142 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4143 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4144 socket_ipv4_multicast_memberships[i].sock = NULL;
4145 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4146 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4147
4148 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4149 }
4150 }
4151 done_socket(sock);
4152 }
4153 #endif /* LWIP_IGMP */
4154
4155 #if LWIP_IPV6_MLD
4156 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4157 *
4158 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4159 *
4160 * @return 1 on success, 0 on failure
4161 */
4162 static int
lwip_socket_register_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4163 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4164 {
4165 struct lwip_sock *sock = get_socket(s);
4166 int i;
4167
4168 if (!sock) {
4169 return 0;
4170 }
4171
4172 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4173 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4174 socket_ipv6_multicast_memberships[i].sock = sock;
4175 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4176 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4177 done_socket(sock);
4178 return 1;
4179 }
4180 }
4181 done_socket(sock);
4182 return 0;
4183 }
4184
4185 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4186 * on socket close.
4187 *
4188 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4189 */
4190 static void
lwip_socket_unregister_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4191 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4192 {
4193 struct lwip_sock *sock = get_socket(s);
4194 int i;
4195
4196 if (!sock) {
4197 return;
4198 }
4199
4200 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4201 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4202 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4203 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4204 socket_ipv6_multicast_memberships[i].sock = NULL;
4205 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4206 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4207 break;
4208 }
4209 }
4210 done_socket(sock);
4211 }
4212
4213 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4214 *
4215 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4216 */
4217 static void
lwip_socket_drop_registered_mld6_memberships(int s)4218 lwip_socket_drop_registered_mld6_memberships(int s)
4219 {
4220 struct lwip_sock *sock = get_socket(s);
4221 int i;
4222
4223 if (!sock) {
4224 return;
4225 }
4226
4227 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4228 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4229 ip_addr_t multi_addr;
4230 u8_t if_idx;
4231
4232 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4233 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4234
4235 socket_ipv6_multicast_memberships[i].sock = NULL;
4236 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4237 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4238
4239 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4240 }
4241 }
4242 done_socket(sock);
4243 }
4244 #endif /* LWIP_IPV6_MLD */
4245
4246 #endif /* LWIP_SOCKET */
4247