1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/igmp.h"
48 #include "lwip/inet.h"
49 #include "lwip/tcp.h"
50 #include "lwip/raw.h"
51 #include "lwip/udp.h"
52 #include "lwip/memp.h"
53 #include "lwip/pbuf.h"
54 #include "lwip/netif.h"
55 #include "lwip/priv/tcpip_priv.h"
56 #include "lwip/mld6.h"
57 #if LWIP_CHECKSUM_ON_COPY
58 #include "lwip/inet_chksum.h"
59 #endif
60
61 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
62 #include <stdarg.h>
63 #endif
64
65 #include <string.h>
66
67 #ifdef LWIP_HOOK_FILENAME
68 #include LWIP_HOOK_FILENAME
69 #endif
70
71 /* If the netconn API is not required publicly, then we include the necessary
72 files here to get the implementation */
73 #if !LWIP_NETCONN
74 #undef LWIP_NETCONN
75 #define LWIP_NETCONN 1
76 #include "api_msg.c"
77 #include "api_lib.c"
78 #include "netbuf.c"
79 #undef LWIP_NETCONN
80 #define LWIP_NETCONN 0
81 #endif
82
83 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
84 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
85 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
86 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
87
88 #if LWIP_IPV4
89 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
90 (sin)->sin_len = sizeof(struct sockaddr_in); \
91 (sin)->sin_family = AF_INET; \
92 (sin)->sin_port = lwip_htons((port)); \
93 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
94 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
95 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
96 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
97 (port) = lwip_ntohs((sin)->sin_port); }while(0)
98 #endif /* LWIP_IPV4 */
99
100 #if LWIP_IPV6
101 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
102 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
103 (sin6)->sin6_family = AF_INET6; \
104 (sin6)->sin6_port = lwip_htons((port)); \
105 (sin6)->sin6_flowinfo = 0; \
106 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
107 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
108 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
109 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
110 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
111 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
112 } \
113 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
114 #endif /* LWIP_IPV6 */
115
116 #if LWIP_IPV4 && LWIP_IPV6
117 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
118
119 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
120 ((namelen) == sizeof(struct sockaddr_in6)))
121 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
122 ((name)->sa_family == AF_INET6))
123 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
124 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
125 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
126 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
127 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
128 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
129 } else { \
130 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
131 } } while(0)
132 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
133 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
134 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
135 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
136 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
137 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
138 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
139 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
140 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
141 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
142 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
143 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
144 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
145 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
146 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
147 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
148 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
149 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
150 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
151 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
152 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
153 #endif /* LWIP_IPV6 */
154
155 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
156 IS_SOCK_ADDR_TYPE_VALID(name))
157 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
158 SOCK_ADDR_TYPE_MATCH(name, sock))
159 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
160
161
162 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
163 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
164 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
165 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
166 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
167 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
168 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
169 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
170 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
171 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
172
173
174 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
175 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
176 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
177 #if LWIP_MPU_COMPATIBLE
178 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
179 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
180 if (name == NULL) { \
181 sock_set_errno(sock, ENOMEM); \
182 done_socket(sock); \
183 return -1; \
184 } }while(0)
185 #else /* LWIP_MPU_COMPATIBLE */
186 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
187 #endif /* LWIP_MPU_COMPATIBLE */
188
189 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
190 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
191 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
192 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
193 #else
194 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
195 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
196 u32_t loc = (val); \
197 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
198 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
199 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
200 #endif
201
202
203 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
204 * sockaddr_in6 if instantiated.
205 */
206 union sockaddr_aligned {
207 struct sockaddr sa;
208 #if LWIP_IPV6
209 struct sockaddr_in6 sin6;
210 #endif /* LWIP_IPV6 */
211 #if LWIP_IPV4
212 struct sockaddr_in sin;
213 #endif /* LWIP_IPV4 */
214 };
215
216 /* Define the number of IPv4 multicast memberships, default is one per socket */
217 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
218 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
219 #endif
220
221 #if LWIP_IGMP
222 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
223 a socket is closed */
224 struct lwip_socket_multicast_pair {
225 /** the socket */
226 struct lwip_sock *sock;
227 /** the interface address */
228 ip4_addr_t if_addr;
229 /** the group address */
230 ip4_addr_t multi_addr;
231 };
232
233 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
234
235 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
236 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
237 static void lwip_socket_drop_registered_memberships(int s);
238 #endif /* LWIP_IGMP */
239
240 #if LWIP_IPV6_MLD
241 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
242 a socket is closed */
243 struct lwip_socket_multicast_mld6_pair {
244 /** the socket */
245 struct lwip_sock *sock;
246 /** the interface index */
247 u8_t if_idx;
248 /** the group address */
249 ip6_addr_t multi_addr;
250 };
251
252 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
253
254 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
255 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
256 static void lwip_socket_drop_registered_mld6_memberships(int s);
257 #endif /* LWIP_IPV6_MLD */
258
259 /** The global array of available sockets */
260 static struct lwip_sock sockets[NUM_SOCKETS];
261
262 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
263 #if LWIP_TCPIP_CORE_LOCKING
264 /* protect the select_cb_list using core lock */
265 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
266 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
267 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
268 #else /* LWIP_TCPIP_CORE_LOCKING */
269 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
270 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
271 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
272 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
273 /** This counter is increased from lwip_select when the list is changed
274 and checked in select_check_waiters to see if it has changed. */
275 static volatile int select_cb_ctr;
276 #endif /* LWIP_TCPIP_CORE_LOCKING */
277 /** The global list of tasks waiting for select */
278 static struct lwip_select_cb *select_cb_list;
279 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
280
281 #define sock_set_errno(sk, e) do { \
282 const int sockerr = (e); \
283 set_errno(sockerr); \
284 } while (0)
285
286 /* Forward declaration of some functions */
287 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
288 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
289 #define DEFAULT_SOCKET_EVENTCB event_callback
290 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
291 #else
292 #define DEFAULT_SOCKET_EVENTCB NULL
293 #endif
294 #if !LWIP_TCPIP_CORE_LOCKING
295 static void lwip_getsockopt_callback(void *arg);
296 static void lwip_setsockopt_callback(void *arg);
297 #endif
298 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
299 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
300 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
301 union lwip_sock_lastdata *lastdata);
302 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
303
304 #if LWIP_IPV4 && LWIP_IPV6
305 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)306 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
307 {
308 if ((sockaddr->sa_family) == AF_INET6) {
309 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
310 ipaddr->type = IPADDR_TYPE_V6;
311 } else {
312 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
313 ipaddr->type = IPADDR_TYPE_V4;
314 }
315 }
316 #endif /* LWIP_IPV4 && LWIP_IPV6 */
317
318 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
319 void
lwip_socket_thread_init(void)320 lwip_socket_thread_init(void)
321 {
322 netconn_thread_init();
323 }
324
325 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
326 void
lwip_socket_thread_cleanup(void)327 lwip_socket_thread_cleanup(void)
328 {
329 netconn_thread_cleanup();
330 }
331
332 #if LWIP_NETCONN_FULLDUPLEX
333 /* Thread-safe increment of sock->fd_used, with overflow check */
334 static int
sock_inc_used(struct lwip_sock * sock)335 sock_inc_used(struct lwip_sock *sock)
336 {
337 int ret;
338 SYS_ARCH_DECL_PROTECT(lev);
339
340 LWIP_ASSERT("sock != NULL", sock != NULL);
341
342 SYS_ARCH_PROTECT(lev);
343 if (sock->fd_free_pending) {
344 /* prevent new usage of this socket if free is pending */
345 ret = 0;
346 } else {
347 ++sock->fd_used;
348 ret = 1;
349 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
350 }
351 SYS_ARCH_UNPROTECT(lev);
352 return ret;
353 }
354
355 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
356 static int
sock_inc_used_locked(struct lwip_sock * sock)357 sock_inc_used_locked(struct lwip_sock *sock)
358 {
359 LWIP_ASSERT("sock != NULL", sock != NULL);
360
361 if (sock->fd_free_pending) {
362 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
363 return 0;
364 }
365
366 ++sock->fd_used;
367 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
368 return 1;
369 }
370
371 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
372 * released (and possibly reused) when used from more than one thread
373 * (e.g. read-while-write or close-while-write, etc)
374 * This function is called at the end of functions using (try)get_socket*().
375 */
376 static void
done_socket(struct lwip_sock * sock)377 done_socket(struct lwip_sock *sock)
378 {
379 int freed = 0;
380 int is_tcp = 0;
381 struct netconn *conn = NULL;
382 union lwip_sock_lastdata lastdata;
383 SYS_ARCH_DECL_PROTECT(lev);
384 LWIP_ASSERT("sock != NULL", sock != NULL);
385
386 SYS_ARCH_PROTECT(lev);
387 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
388 if (--sock->fd_used == 0) {
389 if (sock->fd_free_pending) {
390 /* free the socket */
391 sock->fd_used = 1;
392 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
393 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
394 }
395 }
396 SYS_ARCH_UNPROTECT(lev);
397
398 if (freed) {
399 free_socket_free_elements(is_tcp, conn, &lastdata);
400 }
401 }
402
403 #else /* LWIP_NETCONN_FULLDUPLEX */
404 #define sock_inc_used(sock) 1
405 #define sock_inc_used_locked(sock) 1
406 #define done_socket(sock)
407 #endif /* LWIP_NETCONN_FULLDUPLEX */
408
409 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
410 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)411 tryget_socket_unconn_nouse(int fd)
412 {
413 int s = fd - LWIP_SOCKET_OFFSET;
414 if ((s < 0) || (s >= NUM_SOCKETS)) {
415 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
416 return NULL;
417 }
418 return &sockets[s];
419 }
420
421 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)422 lwip_socket_dbg_get_socket(int fd)
423 {
424 return tryget_socket_unconn_nouse(fd);
425 }
426
427 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
428 static struct lwip_sock *
tryget_socket_unconn(int fd)429 tryget_socket_unconn(int fd)
430 {
431 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
432 if (ret != NULL) {
433 if (!sock_inc_used(ret)) {
434 return NULL;
435 }
436 }
437 return ret;
438 }
439
440 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
441 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)442 tryget_socket_unconn_locked(int fd)
443 {
444 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
445 if (ret != NULL) {
446 if (!sock_inc_used_locked(ret)) {
447 return NULL;
448 }
449 }
450 return ret;
451 }
452
453 /**
454 * Same as get_socket but doesn't set errno
455 *
456 * @param fd externally used socket index
457 * @return struct lwip_sock for the socket or NULL if not found
458 */
459 static struct lwip_sock *
tryget_socket(int fd)460 tryget_socket(int fd)
461 {
462 struct lwip_sock *sock = tryget_socket_unconn(fd);
463 if (sock != NULL) {
464 if (sock->conn) {
465 return sock;
466 }
467 done_socket(sock);
468 }
469 return NULL;
470 }
471
472 /**
473 * Map a externally used socket index to the internal socket representation.
474 *
475 * @param fd externally used socket index
476 * @return struct lwip_sock for the socket or NULL if not found
477 */
478 static struct lwip_sock *
get_socket(int fd)479 get_socket(int fd)
480 {
481 struct lwip_sock *sock = tryget_socket(fd);
482 if (!sock) {
483 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
484 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
485 }
486 set_errno(EBADF);
487 return NULL;
488 }
489 return sock;
490 }
491
492 /**
493 * Allocate a new socket for a given netconn.
494 *
495 * @param newconn the netconn for which to allocate a socket
496 * @param accepted 1 if socket has been created by accept(),
497 * 0 if socket has been created by socket()
498 * @return the index of the new socket; -1 on error
499 */
500 static int
alloc_socket(struct netconn * newconn,int accepted)501 alloc_socket(struct netconn *newconn, int accepted)
502 {
503 int i;
504 SYS_ARCH_DECL_PROTECT(lev);
505 LWIP_UNUSED_ARG(accepted);
506
507 /* allocate a new socket identifier */
508 for (i = 0; i < NUM_SOCKETS; ++i) {
509 /* Protect socket array */
510 SYS_ARCH_PROTECT(lev);
511 if (!sockets[i].conn) {
512 #if LWIP_NETCONN_FULLDUPLEX
513 if (sockets[i].fd_used) {
514 SYS_ARCH_UNPROTECT(lev);
515 continue;
516 }
517 sockets[i].fd_used = 1;
518 sockets[i].fd_free_pending = 0;
519 #endif
520 sockets[i].conn = newconn;
521 /* The socket is not yet known to anyone, so no need to protect
522 after having marked it as used. */
523 SYS_ARCH_UNPROTECT(lev);
524 sockets[i].lastdata.pbuf = NULL;
525 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
526 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
527 sockets[i].rcvevent = 0;
528 /* TCP sendbuf is empty, but the socket is not yet writable until connected
529 * (unless it has been created by accept()). */
530 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
531 sockets[i].errevent = 0;
532 init_waitqueue_head(&sockets[i].wq);
533 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
534 return i + LWIP_SOCKET_OFFSET;
535 }
536 SYS_ARCH_UNPROTECT(lev);
537 }
538 return -1;
539 }
540
541 /** Free a socket (under lock)
542 *
543 * @param sock the socket to free
544 * @param is_tcp != 0 for TCP sockets, used to free lastdata
545 * @param conn the socekt's netconn is stored here, must be freed externally
546 * @param lastdata lastdata is stored here, must be freed externally
547 */
548 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)549 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
550 union lwip_sock_lastdata *lastdata)
551 {
552 #if LWIP_NETCONN_FULLDUPLEX
553 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
554 sock->fd_used--;
555 if (sock->fd_used > 0) {
556 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
557 return 0;
558 }
559 #else /* LWIP_NETCONN_FULLDUPLEX */
560 LWIP_UNUSED_ARG(is_tcp);
561 #endif /* LWIP_NETCONN_FULLDUPLEX */
562
563 *lastdata = sock->lastdata;
564 sock->lastdata.pbuf = NULL;
565 *conn = sock->conn;
566 sock->conn = NULL;
567 return 1;
568 }
569
570 /** Free a socket's leftover members.
571 */
572 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)573 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
574 {
575 if (lastdata->pbuf != NULL) {
576 if (is_tcp) {
577 pbuf_free(lastdata->pbuf);
578 } else {
579 netbuf_delete(lastdata->netbuf);
580 }
581 }
582 if (conn != NULL) {
583 /* netconn_prepare_delete() has already been called, here we only free the conn */
584 netconn_delete(conn);
585 }
586 }
587
588 /** Free a socket. The socket's netconn must have been
589 * delete before!
590 *
591 * @param sock the socket to free
592 * @param is_tcp != 0 for TCP sockets, used to free lastdata
593 */
594 static void
free_socket(struct lwip_sock * sock,int is_tcp)595 free_socket(struct lwip_sock *sock, int is_tcp)
596 {
597 int freed;
598 struct netconn *conn;
599 union lwip_sock_lastdata lastdata;
600 SYS_ARCH_DECL_PROTECT(lev);
601
602 /* Protect socket array */
603 SYS_ARCH_PROTECT(lev);
604
605 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
606 SYS_ARCH_UNPROTECT(lev);
607 /* don't use 'sock' after this line, as another task might have allocated it */
608
609 if (freed) {
610 free_socket_free_elements(is_tcp, conn, &lastdata);
611 }
612 }
613
614 /* Below this, the well-known socket functions are implemented.
615 * Use google.com or opengroup.org to get a good description :-)
616 *
617 * Exceptions are documented!
618 */
619
620 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)621 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
622 {
623 struct lwip_sock *sock, *nsock;
624 struct netconn *newconn;
625 ip_addr_t naddr;
626 u16_t port = 0;
627 int newsock;
628 err_t err;
629 int recvevent;
630 SYS_ARCH_DECL_PROTECT(lev);
631
632 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
633 sock = get_socket(s);
634 if (!sock) {
635 return -1;
636 }
637
638 /* wait for a new connection */
639 err = netconn_accept(sock->conn, &newconn);
640 if (err != ERR_OK) {
641 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
642 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
643 sock_set_errno(sock, EOPNOTSUPP);
644 } else if (err == ERR_CLSD) {
645 sock_set_errno(sock, EINVAL);
646 } else {
647 sock_set_errno(sock, err_to_errno(err));
648 }
649 done_socket(sock);
650 return -1;
651 }
652 LWIP_ASSERT("newconn != NULL", newconn != NULL);
653
654 newsock = alloc_socket(newconn, 1);
655 if (newsock == -1) {
656 netconn_delete(newconn);
657 sock_set_errno(sock, ENFILE);
658 done_socket(sock);
659 return -1;
660 }
661 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
662 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
663
664 /* See event_callback: If data comes in right away after an accept, even
665 * though the server task might not have created a new socket yet.
666 * In that case, newconn->socket is counted down (newconn->socket--),
667 * so nsock->rcvevent is >= 1 here!
668 */
669 SYS_ARCH_PROTECT(lev);
670 recvevent = (s16_t)(-1 - newconn->socket);
671 newconn->socket = newsock;
672 SYS_ARCH_UNPROTECT(lev);
673
674 if (newconn->callback) {
675 LOCK_TCPIP_CORE();
676 while (recvevent > 0) {
677 recvevent--;
678 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
679 }
680 UNLOCK_TCPIP_CORE();
681 }
682
683 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
684 * not be NULL if addr is valid.
685 */
686 if ((addr != NULL) && (addrlen != NULL)) {
687 union sockaddr_aligned tempaddr;
688 /* get the IP address and port of the remote host */
689 err = netconn_peer(newconn, &naddr, &port);
690 if (err != ERR_OK) {
691 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
692 netconn_delete(newconn);
693 free_socket(nsock, 1);
694 sock_set_errno(sock, err_to_errno(err));
695 done_socket(sock);
696 return -1;
697 }
698
699 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
700 if (*addrlen > tempaddr.sa.sa_len) {
701 *addrlen = tempaddr.sa.sa_len;
702 }
703 MEMCPY(addr, &tempaddr, *addrlen);
704
705 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
706 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
707 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
708 } else {
709 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
710 }
711
712 sock_set_errno(sock, 0);
713 done_socket(sock);
714 done_socket(nsock);
715 return newsock;
716 }
717
718 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)719 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
720 {
721 struct lwip_sock *sock;
722 ip_addr_t local_addr;
723 u16_t local_port;
724 err_t err;
725
726 sock = get_socket(s);
727 if (!sock) {
728 return -1;
729 }
730
731 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
732 /* sockaddr does not match socket type (IPv4/IPv6) */
733 sock_set_errno(sock, err_to_errno(ERR_VAL));
734 done_socket(sock);
735 return -1;
736 }
737
738 /* check size, family and alignment of 'name' */
739 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
740 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
741 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
742 LWIP_UNUSED_ARG(namelen);
743
744 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
745 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
746 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
747 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
748
749 #if LWIP_IPV4 && LWIP_IPV6
750 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
751 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
752 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
753 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
754 }
755 #endif /* LWIP_IPV4 && LWIP_IPV6 */
756
757 err = netconn_bind(sock->conn, &local_addr, local_port);
758
759 if (err != ERR_OK) {
760 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
761 sock_set_errno(sock, err_to_errno(err));
762 done_socket(sock);
763 return -1;
764 }
765
766 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
767 sock_set_errno(sock, 0);
768 done_socket(sock);
769 return 0;
770 }
771
772 int
lwip_close(int s)773 lwip_close(int s)
774 {
775 struct lwip_sock *sock;
776 int is_tcp = 0;
777 err_t err;
778
779 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
780
781 sock = get_socket(s);
782 if (!sock) {
783 return -1;
784 }
785
786 if (sock->conn != NULL) {
787 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
788 } else {
789 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
790 }
791
792 #if LWIP_IGMP
793 /* drop all possibly joined IGMP memberships */
794 lwip_socket_drop_registered_memberships(s);
795 #endif /* LWIP_IGMP */
796 #if LWIP_IPV6_MLD
797 /* drop all possibly joined MLD6 memberships */
798 lwip_socket_drop_registered_mld6_memberships(s);
799 #endif /* LWIP_IPV6_MLD */
800
801 err = netconn_prepare_delete(sock->conn);
802 if (err != ERR_OK) {
803 sock_set_errno(sock, err_to_errno(err));
804 done_socket(sock);
805 return -1;
806 }
807
808 free_socket(sock, is_tcp);
809 set_errno(0);
810 return 0;
811 }
812
813 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)814 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
815 {
816 struct lwip_sock *sock;
817 err_t err;
818
819 sock = get_socket(s);
820 if (!sock) {
821 return -1;
822 }
823
824 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
825 /* sockaddr does not match socket type (IPv4/IPv6) */
826 sock_set_errno(sock, err_to_errno(ERR_VAL));
827 done_socket(sock);
828 return -1;
829 }
830
831 LWIP_UNUSED_ARG(namelen);
832 if (name->sa_family == AF_UNSPEC) {
833 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
834 err = netconn_disconnect(sock->conn);
835 } else {
836 ip_addr_t remote_addr;
837 u16_t remote_port;
838
839 /* check size, family and alignment of 'name' */
840 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
841 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
842 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
843
844 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
845 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
846 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
847 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
848
849 #if LWIP_IPV4 && LWIP_IPV6
850 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
851 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
852 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
853 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
854 }
855 #endif /* LWIP_IPV4 && LWIP_IPV6 */
856
857 err = netconn_connect(sock->conn, &remote_addr, remote_port);
858 }
859
860 if (err != ERR_OK) {
861 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
862 sock_set_errno(sock, err_to_errno(err));
863 done_socket(sock);
864 return -1;
865 }
866
867 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
868 sock_set_errno(sock, 0);
869 done_socket(sock);
870 return 0;
871 }
872
873 /**
874 * Set a socket into listen mode.
875 * The socket may not have been used for another connection previously.
876 *
877 * @param s the socket to set to listening mode
878 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
879 * @return 0 on success, non-zero on failure
880 */
881 int
lwip_listen(int s,int backlog)882 lwip_listen(int s, int backlog)
883 {
884 struct lwip_sock *sock;
885 err_t err;
886
887 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
888
889 sock = get_socket(s);
890 if (!sock) {
891 return -1;
892 }
893
894 /* limit the "backlog" parameter to fit in an u8_t */
895 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
896
897 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
898
899 if (err != ERR_OK) {
900 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
901 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
902 sock_set_errno(sock, EOPNOTSUPP);
903 } else {
904 sock_set_errno(sock, err_to_errno(err));
905 }
906 done_socket(sock);
907 return -1;
908 }
909
910 sock_set_errno(sock, 0);
911 done_socket(sock);
912 return 0;
913 }
914
915 #if LWIP_TCP
916 /* Helper function to loop over receiving pbufs from netconn
917 * until "len" bytes are received or we're otherwise done.
918 * Keeps sock->lastdata for peeking or partly copying.
919 */
920 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)921 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
922 {
923 u8_t apiflags = NETCONN_NOAUTORCVD;
924 ssize_t recvd = 0;
925 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
926
927 LWIP_ASSERT("no socket given", sock != NULL);
928 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
929
930 if (flags & MSG_DONTWAIT) {
931 apiflags |= NETCONN_DONTBLOCK;
932 }
933
934 do {
935 struct pbuf *p;
936 err_t err;
937 u16_t copylen;
938
939 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
940 /* Check if there is data left from the last recv operation. */
941 if (sock->lastdata.pbuf) {
942 p = sock->lastdata.pbuf;
943 } else {
944 /* No data was left from the previous operation, so we try to get
945 some from the network. */
946 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
947 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
948 err, (void *)p));
949
950 if (err != ERR_OK) {
951 if (recvd > 0) {
952 /* already received data, return that (this trusts in getting the same error from
953 netconn layer again next time netconn_recv is called) */
954 goto lwip_recv_tcp_done;
955 }
956 /* We should really do some error checking here. */
957 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
958 lwip_strerr(err)));
959 sock_set_errno(sock, err_to_errno(err));
960 if (err == ERR_CLSD) {
961 return 0;
962 } else {
963 return -1;
964 }
965 }
966 LWIP_ASSERT("p != NULL", p != NULL);
967 sock->lastdata.pbuf = p;
968 }
969
970 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
971 p->tot_len, (int)recv_left, (int)recvd));
972
973 if (recv_left > p->tot_len) {
974 copylen = p->tot_len;
975 } else {
976 copylen = (u16_t)recv_left;
977 }
978 if (recvd + copylen < recvd) {
979 /* overflow */
980 copylen = (u16_t)(SSIZE_MAX - recvd);
981 }
982
983 /* copy the contents of the received buffer into
984 the supplied memory pointer mem */
985 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
986
987 recvd += copylen;
988
989 /* TCP combines multiple pbufs for one recv */
990 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
991 recv_left -= copylen;
992
993 /* Unless we peek the incoming message... */
994 if ((flags & MSG_PEEK) == 0) {
995 /* ... check if there is data left in the pbuf */
996 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
997 if (p->tot_len - copylen > 0) {
998 /* If so, it should be saved in the sock structure for the next recv call.
999 We store the pbuf but hide/free the consumed data: */
1000 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1001 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1002 } else {
1003 sock->lastdata.pbuf = NULL;
1004 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1005 pbuf_free(p);
1006 }
1007 }
1008 /* once we have some data to return, only add more if we don't need to wait */
1009 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1010 /* @todo: do we need to support peeking more than one pbuf? */
1011 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1012 lwip_recv_tcp_done:
1013 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1014 /* ensure window update after copying all data */
1015 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1016 }
1017 sock_set_errno(sock, 0);
1018 return recvd;
1019 }
1020 #endif
1021
1022 /* Convert a netbuf's address data to struct sockaddr */
1023 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1024 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1025 struct sockaddr *from, socklen_t *fromlen)
1026 {
1027 int truncated = 0;
1028 union sockaddr_aligned saddr;
1029
1030 LWIP_UNUSED_ARG(conn);
1031
1032 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1033 LWIP_ASSERT("from != NULL", from != NULL);
1034 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1035
1036 #if LWIP_IPV4 && LWIP_IPV6
1037 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1038 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1039 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1040 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1041 }
1042 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1043
1044 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1045 DF_NADDR(*fromaddr);
1046 if (*fromlen < saddr.sa.sa_len) {
1047 truncated = 1;
1048 } else if (*fromlen > saddr.sa.sa_len) {
1049 *fromlen = saddr.sa.sa_len;
1050 }
1051 MEMCPY(from, &saddr, *fromlen);
1052 return truncated;
1053 }
1054
1055 #if LWIP_TCP
1056 /* Helper function to get a tcp socket's remote address info */
1057 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1058 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1059 {
1060 if (sock == NULL) {
1061 return 0;
1062 }
1063 LWIP_UNUSED_ARG(dbg_fn);
1064 LWIP_UNUSED_ARG(dbg_s);
1065 LWIP_UNUSED_ARG(dbg_ret);
1066
1067 #if !SOCKETS_DEBUG
1068 if (from && fromlen)
1069 #endif /* !SOCKETS_DEBUG */
1070 {
1071 /* get remote addr/port from tcp_pcb */
1072 u16_t port;
1073 ip_addr_t tmpaddr;
1074 err_t err = netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1075 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1076 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1077 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1078 if (!err && from && fromlen) {
1079 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1080 }
1081 }
1082 return 0;
1083 }
1084 #endif
1085
1086 /* Helper function to receive a netbuf from a udp or raw netconn.
1087 * Keeps sock->lastdata for peeking.
1088 */
1089 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1090 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1091 {
1092 struct netbuf *buf;
1093 u8_t apiflags;
1094 err_t err;
1095 u16_t buflen, copylen, copied;
1096 int i;
1097
1098 LWIP_UNUSED_ARG(dbg_s);
1099 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1100
1101 if (flags & MSG_DONTWAIT) {
1102 apiflags = NETCONN_DONTBLOCK;
1103 } else {
1104 apiflags = 0;
1105 }
1106
1107 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1108 /* Check if there is data left from the last recv operation. */
1109 buf = sock->lastdata.netbuf;
1110 if (buf == NULL) {
1111 /* No data was left from the previous operation, so we try to get
1112 some from the network. */
1113 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1114 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1115 err, (void *)buf));
1116
1117 if (err != ERR_OK) {
1118 return err;
1119 }
1120 LWIP_ASSERT("buf != NULL", buf != NULL);
1121 sock->lastdata.netbuf = buf;
1122 }
1123 buflen = buf->p->tot_len;
1124 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1125
1126 copied = 0;
1127 /* copy the pbuf payload into the iovs */
1128 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1129 u16_t len_left = (u16_t)(buflen - copied);
1130 if (msg->msg_iov[i].iov_len > len_left) {
1131 copylen = len_left;
1132 } else {
1133 copylen = (u16_t)msg->msg_iov[i].iov_len;
1134 }
1135
1136 /* copy the contents of the received buffer into
1137 the supplied memory buffer */
1138 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1139 copied = (u16_t)(copied + copylen);
1140 }
1141
1142 /* Check to see from where the data was.*/
1143 #if !SOCKETS_DEBUG
1144 if (msg->msg_name && msg->msg_namelen)
1145 #endif /* !SOCKETS_DEBUG */
1146 {
1147 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1148 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1149 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1150 if (msg->msg_name && msg->msg_namelen) {
1151 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1152 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1153 }
1154 }
1155
1156 /* Initialize flag output */
1157 msg->msg_flags = 0;
1158
1159 if (msg->msg_control) {
1160 u8_t wrote_msg = 0;
1161 #if LWIP_NETBUF_RECVINFO
1162 /* Check if packet info was recorded */
1163 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1164 if (IP_IS_V4(&buf->toaddr)) {
1165 #if LWIP_IPV4
1166 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1167 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1168 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1169 chdr->cmsg_level = IPPROTO_IP;
1170 chdr->cmsg_type = IP_PKTINFO;
1171 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1172 pkti->ipi_ifindex = buf->p->if_idx;
1173 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1174 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1175 wrote_msg = 1;
1176 } else {
1177 msg->msg_flags |= MSG_CTRUNC;
1178 }
1179 #endif /* LWIP_IPV4 */
1180 }
1181 }
1182 #endif /* LWIP_NETBUF_RECVINFO */
1183
1184 if (!wrote_msg) {
1185 msg->msg_controllen = 0;
1186 }
1187 }
1188
1189 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1190 if ((flags & MSG_PEEK) == 0) {
1191 sock->lastdata.netbuf = NULL;
1192 netbuf_delete(buf);
1193 }
1194 if (datagram_len) {
1195 *datagram_len = buflen;
1196 }
1197 return ERR_OK;
1198 }
1199
1200 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1201 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1202 struct sockaddr *from, socklen_t *fromlen)
1203 {
1204 struct lwip_sock *sock;
1205 ssize_t ret;
1206
1207 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1208 sock = get_socket(s);
1209 if (!sock) {
1210 return -1;
1211 }
1212 #if LWIP_TCP
1213 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1214 ret = lwip_recv_tcp(sock, mem, len, flags);
1215 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1216 done_socket(sock);
1217 return ret;
1218 } else
1219 #endif
1220 {
1221 u16_t datagram_len = 0;
1222 struct iovec vec;
1223 struct msghdr msg;
1224 err_t err;
1225 vec.iov_base = mem;
1226 vec.iov_len = len;
1227 msg.msg_control = NULL;
1228 msg.msg_controllen = 0;
1229 msg.msg_flags = 0;
1230 msg.msg_iov = &vec;
1231 msg.msg_iovlen = 1;
1232 msg.msg_name = from;
1233 msg.msg_namelen = (fromlen ? *fromlen : 0);
1234 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1235 if (err != ERR_OK) {
1236 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1237 s, lwip_strerr(err)));
1238 sock_set_errno(sock, err_to_errno(err));
1239 done_socket(sock);
1240 return -1;
1241 }
1242 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1243 if (fromlen) {
1244 *fromlen = msg.msg_namelen;
1245 }
1246 }
1247
1248 sock_set_errno(sock, 0);
1249 done_socket(sock);
1250 return ret;
1251 }
1252
1253 ssize_t
lwip_read(int s,void * mem,size_t len)1254 lwip_read(int s, void *mem, size_t len)
1255 {
1256 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1257 }
1258
1259 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1260 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1261 {
1262 struct msghdr msg;
1263
1264 msg.msg_name = NULL;
1265 msg.msg_namelen = 0;
1266 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1267 Blame the opengroup standard for this inconsistency. */
1268 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1269 msg.msg_iovlen = iovcnt;
1270 msg.msg_control = NULL;
1271 msg.msg_controllen = 0;
1272 msg.msg_flags = 0;
1273 return lwip_recvmsg(s, &msg, 0);
1274 }
1275
1276 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1277 lwip_recv(int s, void *mem, size_t len, int flags)
1278 {
1279 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1280 }
1281
1282 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1283 lwip_recvmsg(int s, struct msghdr *message, int flags)
1284 {
1285 struct lwip_sock *sock;
1286 int i;
1287 ssize_t buflen;
1288
1289 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1290 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1291 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1292 set_errno(EOPNOTSUPP); return -1;);
1293
1294 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1295 set_errno(EMSGSIZE);
1296 return -1;
1297 }
1298
1299 sock = get_socket(s);
1300 if (!sock) {
1301 return -1;
1302 }
1303
1304 /* check for valid vectors */
1305 buflen = 0;
1306 for (i = 0; i < message->msg_iovlen; i++) {
1307 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1308 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1309 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1310 sock_set_errno(sock, err_to_errno(ERR_VAL));
1311 done_socket(sock);
1312 return -1;
1313 }
1314 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1315 }
1316
1317 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1318 #if LWIP_TCP
1319 int recv_flags = flags;
1320 message->msg_flags = 0;
1321 /* recv the data */
1322 buflen = 0;
1323 for (i = 0; i < message->msg_iovlen; i++) {
1324 /* try to receive into this vector's buffer */
1325 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1326 if (recvd_local > 0) {
1327 /* sum up received bytes */
1328 buflen += recvd_local;
1329 }
1330 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1331 (flags & MSG_PEEK)) {
1332 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1333 if (buflen <= 0) {
1334 /* nothing received at all, propagate the error */
1335 buflen = recvd_local;
1336 }
1337 break;
1338 }
1339 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1340 recv_flags |= MSG_DONTWAIT;
1341 }
1342 if (buflen > 0) {
1343 /* reset socket error since we have received something */
1344 sock_set_errno(sock, 0);
1345 }
1346 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1347 done_socket(sock);
1348 return buflen;
1349 #else /* LWIP_TCP */
1350 sock_set_errno(sock, err_to_errno(ERR_ARG));
1351 done_socket(sock);
1352 return -1;
1353 #endif /* LWIP_TCP */
1354 }
1355 /* else, UDP and RAW NETCONNs */
1356 #if LWIP_UDP || LWIP_RAW
1357 {
1358 u16_t datagram_len = 0;
1359 err_t err;
1360 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1361 if (err != ERR_OK) {
1362 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1363 s, lwip_strerr(err)));
1364 sock_set_errno(sock, err_to_errno(err));
1365 done_socket(sock);
1366 return -1;
1367 }
1368 if (datagram_len > buflen) {
1369 message->msg_flags |= MSG_TRUNC;
1370 }
1371
1372 sock_set_errno(sock, 0);
1373 done_socket(sock);
1374 return (int)datagram_len;
1375 }
1376 #else /* LWIP_UDP || LWIP_RAW */
1377 sock_set_errno(sock, err_to_errno(ERR_ARG));
1378 done_socket(sock);
1379 return -1;
1380 #endif /* LWIP_UDP || LWIP_RAW */
1381 }
1382
1383 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1384 lwip_send(int s, const void *data, size_t size, int flags)
1385 {
1386 struct lwip_sock *sock;
1387 err_t err;
1388 u8_t write_flags;
1389 size_t written;
1390
1391 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1392 s, data, size, flags));
1393
1394 sock = get_socket(s);
1395 if (!sock) {
1396 return -1;
1397 }
1398
1399 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1400 #if (LWIP_UDP || LWIP_RAW)
1401 done_socket(sock);
1402 return lwip_sendto(s, data, size, flags, NULL, 0);
1403 #else /* (LWIP_UDP || LWIP_RAW) */
1404 sock_set_errno(sock, err_to_errno(ERR_ARG));
1405 done_socket(sock);
1406 return -1;
1407 #endif /* (LWIP_UDP || LWIP_RAW) */
1408 }
1409
1410 write_flags = (u8_t)(NETCONN_COPY |
1411 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1412 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1413 written = 0;
1414 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1415
1416 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1417 sock_set_errno(sock, err_to_errno(err));
1418 done_socket(sock);
1419 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1420 return (err == ERR_OK ? (ssize_t)written : -1);
1421 }
1422
1423 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1424 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1425 {
1426 struct lwip_sock *sock;
1427 #if LWIP_TCP
1428 u8_t write_flags;
1429 size_t written;
1430 #endif
1431 err_t err = ERR_OK;
1432
1433 sock = get_socket(s);
1434 if (!sock) {
1435 return -1;
1436 }
1437
1438 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1439 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1440 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1441 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1442 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1443 sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;);
1444 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1445 sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;);
1446
1447 LWIP_UNUSED_ARG(msg->msg_control);
1448 LWIP_UNUSED_ARG(msg->msg_controllen);
1449 LWIP_UNUSED_ARG(msg->msg_flags);
1450
1451 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1452 #if LWIP_TCP
1453 write_flags = (u8_t)(NETCONN_COPY |
1454 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1455 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1456
1457 written = 0;
1458 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1459 sock_set_errno(sock, err_to_errno(err));
1460 done_socket(sock);
1461 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1462 return (err == ERR_OK ? (ssize_t)written : -1);
1463 #else /* LWIP_TCP */
1464 sock_set_errno(sock, err_to_errno(ERR_ARG));
1465 done_socket(sock);
1466 return -1;
1467 #endif /* LWIP_TCP */
1468 }
1469 /* else, UDP and RAW NETCONNs */
1470 #if LWIP_UDP || LWIP_RAW
1471 {
1472 struct netbuf chain_buf;
1473 int i;
1474 ssize_t size = 0;
1475
1476 LWIP_UNUSED_ARG(flags);
1477 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1478 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1479 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1480
1481 /* initialize chain buffer with destination */
1482 memset(&chain_buf, 0, sizeof(struct netbuf));
1483 if (msg->msg_name) {
1484 u16_t remote_port;
1485 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1486 netbuf_fromport(&chain_buf) = remote_port;
1487 }
1488 #if LWIP_NETIF_TX_SINGLE_PBUF
1489 for (i = 0; i < msg->msg_iovlen; i++) {
1490 size += msg->msg_iov[i].iov_len;
1491 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1492 /* overflow */
1493 goto sendmsg_emsgsize;
1494 }
1495 }
1496 if (size > 0xFFFF) {
1497 /* overflow */
1498 goto sendmsg_emsgsize;
1499 }
1500 /* Allocate a new netbuf and copy the data into it. */
1501 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1502 err = ERR_MEM;
1503 } else {
1504 /* flatten the IO vectors */
1505 size_t offset = 0;
1506 for (i = 0; i < msg->msg_iovlen; i++) {
1507 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1508 offset += msg->msg_iov[i].iov_len;
1509 }
1510 #if LWIP_CHECKSUM_ON_COPY
1511 {
1512 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1513 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1514 netbuf_set_chksum(&chain_buf, chksum);
1515 }
1516 #endif /* LWIP_CHECKSUM_ON_COPY */
1517 err = ERR_OK;
1518 }
1519 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1520 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1521 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1522 for (i = 0; i < msg->msg_iovlen; i++) {
1523 struct pbuf *p;
1524 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1525 /* overflow */
1526 goto sendmsg_emsgsize;
1527 }
1528 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1529 if (p == NULL) {
1530 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1531 break;
1532 }
1533 p->payload = msg->msg_iov[i].iov_base;
1534 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1535 /* netbuf empty, add new pbuf */
1536 if (chain_buf.p == NULL) {
1537 chain_buf.p = chain_buf.ptr = p;
1538 /* add pbuf to existing pbuf chain */
1539 } else {
1540 if (chain_buf.p->tot_len + p->len > 0xffff) {
1541 /* overflow */
1542 pbuf_free(p);
1543 goto sendmsg_emsgsize;
1544 }
1545 pbuf_cat(chain_buf.p, p);
1546 }
1547 }
1548 /* save size of total chain */
1549 if (err == ERR_OK) {
1550 size = netbuf_len(&chain_buf);
1551 }
1552 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1553
1554 if (err == ERR_OK) {
1555 #if LWIP_IPV4 && LWIP_IPV6
1556 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1557 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1558 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1559 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1560 }
1561 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1562
1563 /* send the data */
1564 err = netconn_send(sock->conn, &chain_buf);
1565 }
1566
1567 /* deallocated the buffer */
1568 netbuf_free(&chain_buf);
1569
1570 sock_set_errno(sock, err_to_errno(err));
1571 done_socket(sock);
1572 return (err == ERR_OK ? size : -1);
1573 sendmsg_emsgsize:
1574 sock_set_errno(sock, EMSGSIZE);
1575 netbuf_free(&chain_buf);
1576 done_socket(sock);
1577 return -1;
1578 }
1579 #else /* LWIP_UDP || LWIP_RAW */
1580 sock_set_errno(sock, err_to_errno(ERR_ARG));
1581 done_socket(sock);
1582 return -1;
1583 #endif /* LWIP_UDP || LWIP_RAW */
1584 }
1585
1586 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1587 lwip_sendto(int s, const void *data, size_t size, int flags,
1588 const struct sockaddr *to, socklen_t tolen)
1589 {
1590 struct lwip_sock *sock;
1591 err_t err;
1592 u16_t short_size;
1593 u16_t remote_port;
1594 struct netbuf buf;
1595
1596 sock = get_socket(s);
1597 if (!sock) {
1598 return -1;
1599 }
1600
1601 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1602 #if LWIP_TCP
1603 done_socket(sock);
1604 return lwip_send(s, data, size, flags);
1605 #else /* LWIP_TCP */
1606 LWIP_UNUSED_ARG(flags);
1607 sock_set_errno(sock, err_to_errno(ERR_ARG));
1608 done_socket(sock);
1609 return -1;
1610 #endif /* LWIP_TCP */
1611 }
1612
1613 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1614 /* cannot fit into one datagram (at least for us) */
1615 sock_set_errno(sock, EMSGSIZE);
1616 done_socket(sock);
1617 return -1;
1618 }
1619 short_size = (u16_t)size;
1620 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1621 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1622 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1623 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1624 LWIP_UNUSED_ARG(tolen);
1625
1626 /* initialize a buffer */
1627 buf.p = buf.ptr = NULL;
1628 #if LWIP_CHECKSUM_ON_COPY
1629 buf.flags = 0;
1630 #endif /* LWIP_CHECKSUM_ON_COPY */
1631 if (to) {
1632 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1633 } else {
1634 remote_port = 0;
1635 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1636 }
1637 netbuf_fromport(&buf) = remote_port;
1638
1639
1640 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1641 s, data, short_size, flags));
1642 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1643 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1644
1645 /* make the buffer point to the data that should be sent */
1646 #if LWIP_NETIF_TX_SINGLE_PBUF
1647 /* Allocate a new netbuf and copy the data into it. */
1648 if (netbuf_alloc(&buf, short_size) == NULL) {
1649 err = ERR_MEM;
1650 } else {
1651 #if LWIP_CHECKSUM_ON_COPY
1652 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1653 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1654 netbuf_set_chksum(&buf, chksum);
1655 } else
1656 #endif /* LWIP_CHECKSUM_ON_COPY */
1657 {
1658 MEMCPY(buf.p->payload, data, short_size);
1659 }
1660 err = ERR_OK;
1661 }
1662 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1663 err = netbuf_ref(&buf, data, short_size);
1664 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1665 if (err == ERR_OK) {
1666 #if LWIP_IPV4 && LWIP_IPV6
1667 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1668 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1669 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1670 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1671 }
1672 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1673
1674 /* send the data */
1675 err = netconn_send(sock->conn, &buf);
1676 }
1677
1678 /* deallocated the buffer */
1679 netbuf_free(&buf);
1680
1681 sock_set_errno(sock, err_to_errno(err));
1682 done_socket(sock);
1683 return (err == ERR_OK ? short_size : -1);
1684 }
1685
1686 int
lwip_socket(int domain,int type,int protocol)1687 lwip_socket(int domain, int type, int protocol)
1688 {
1689 struct netconn *conn;
1690 int i;
1691
1692 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1693
1694 /* create a netconn */
1695 switch (type) {
1696 case SOCK_RAW:
1697 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1698 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1699 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1700 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1701 break;
1702 case SOCK_DGRAM:
1703 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1704 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1705 DEFAULT_SOCKET_EVENTCB);
1706 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1707 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1708 #if LWIP_NETBUF_RECVINFO
1709 if (conn) {
1710 /* netconn layer enables pktinfo by default, sockets default to off */
1711 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1712 }
1713 #endif /* LWIP_NETBUF_RECVINFO */
1714 break;
1715 case SOCK_STREAM:
1716 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1717 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1718 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1719 break;
1720 default:
1721 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1722 domain, type, protocol));
1723 set_errno(EINVAL);
1724 return -1;
1725 }
1726
1727 if (!conn) {
1728 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1729 set_errno(ENOBUFS);
1730 return -1;
1731 }
1732
1733 i = alloc_socket(conn, 0);
1734
1735 if (i == -1) {
1736 netconn_delete(conn);
1737 set_errno(ENFILE);
1738 return -1;
1739 }
1740 conn->socket = i;
1741 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1742 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1743 set_errno(0);
1744 return i;
1745 }
1746
1747 ssize_t
lwip_write(int s,const void * data,size_t size)1748 lwip_write(int s, const void *data, size_t size)
1749 {
1750 return lwip_send(s, data, size, 0);
1751 }
1752
1753 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1754 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1755 {
1756 struct msghdr msg;
1757
1758 msg.msg_name = NULL;
1759 msg.msg_namelen = 0;
1760 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1761 Blame the opengroup standard for this inconsistency. */
1762 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1763 msg.msg_iovlen = iovcnt;
1764 msg.msg_control = NULL;
1765 msg.msg_controllen = 0;
1766 msg.msg_flags = 0;
1767 return lwip_sendmsg(s, &msg, 0);
1768 }
1769
1770 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1771 /* Add select_cb to select_cb_list. */
1772 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1773 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1774 {
1775 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1776
1777 /* Protect the select_cb_list */
1778 LWIP_SOCKET_SELECT_PROTECT(lev);
1779
1780 /* Put this select_cb on top of list */
1781 select_cb->next = select_cb_list;
1782 if (select_cb_list != NULL) {
1783 select_cb_list->prev = select_cb;
1784 }
1785 select_cb_list = select_cb;
1786 #if !LWIP_TCPIP_CORE_LOCKING
1787 /* Increasing this counter tells select_check_waiters that the list has changed. */
1788 select_cb_ctr++;
1789 #endif
1790
1791 /* Now we can safely unprotect */
1792 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1793 }
1794
1795 /* Remove select_cb from select_cb_list. */
1796 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1797 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1798 {
1799 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1800
1801 /* Take us off the list */
1802 LWIP_SOCKET_SELECT_PROTECT(lev);
1803 if (select_cb->next != NULL) {
1804 select_cb->next->prev = select_cb->prev;
1805 }
1806 if (select_cb_list == select_cb) {
1807 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1808 select_cb_list = select_cb->next;
1809 } else {
1810 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1811 select_cb->prev->next = select_cb->next;
1812 }
1813 #if !LWIP_TCPIP_CORE_LOCKING
1814 /* Increasing this counter tells select_check_waiters that the list has changed. */
1815 select_cb_ctr++;
1816 #endif
1817 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1818 }
1819 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1820
1821 #if LWIP_SOCKET_SELECT
1822 /**
1823 * Go through the readset and writeset lists and see which socket of the sockets
1824 * set in the sets has events. On return, readset, writeset and exceptset have
1825 * the sockets enabled that had events.
1826 *
1827 * @param maxfdp1 the highest socket index in the sets
1828 * @param readset_in set of sockets to check for read events
1829 * @param writeset_in set of sockets to check for write events
1830 * @param exceptset_in set of sockets to check for error events
1831 * @param readset_out set of sockets that had read events
1832 * @param writeset_out set of sockets that had write events
1833 * @param exceptset_out set os sockets that had error events
1834 * @return number of sockets that had events (read/write/exception) (>= 0)
1835 */
1836 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1837 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1838 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1839 {
1840 int i, nready = 0;
1841 fd_set lreadset, lwriteset, lexceptset;
1842 struct lwip_sock *sock;
1843 SYS_ARCH_DECL_PROTECT(lev);
1844
1845 FD_ZERO(&lreadset);
1846 FD_ZERO(&lwriteset);
1847 FD_ZERO(&lexceptset);
1848
1849 /* Go through each socket in each list to count number of sockets which
1850 currently match */
1851 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1852 /* if this FD is not in the set, continue */
1853 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1854 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1855 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1856 continue;
1857 }
1858 /* First get the socket's status (protected)... */
1859 SYS_ARCH_PROTECT(lev);
1860 sock = tryget_socket_unconn_locked(i);
1861 if (sock != NULL) {
1862 void *lastdata = sock->lastdata.pbuf;
1863 s16_t rcvevent = sock->rcvevent;
1864 u16_t sendevent = sock->sendevent;
1865 u16_t errevent = sock->errevent;
1866 SYS_ARCH_UNPROTECT(lev);
1867
1868 /* ... then examine it: */
1869 /* See if netconn of this socket is ready for read */
1870 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1871 FD_SET(i, &lreadset);
1872 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1873 nready++;
1874 }
1875 /* See if netconn of this socket is ready for write */
1876 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1877 FD_SET(i, &lwriteset);
1878 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1879 nready++;
1880 }
1881 /* See if netconn of this socket had an error */
1882 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1883 FD_SET(i, &lexceptset);
1884 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1885 nready++;
1886 }
1887 done_socket(sock);
1888 } else {
1889 SYS_ARCH_UNPROTECT(lev);
1890 /* no a valid open socket */
1891 return -1;
1892 }
1893 }
1894 /* copy local sets to the ones provided as arguments */
1895 *readset_out = lreadset;
1896 *writeset_out = lwriteset;
1897 *exceptset_out = lexceptset;
1898
1899 LWIP_ASSERT("nready >= 0", nready >= 0);
1900 return nready;
1901 }
1902
1903 #if LWIP_NETCONN_FULLDUPLEX
1904 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1905 * All sockets are marked (and later unmarked), whether they are open or not.
1906 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1907 */
1908 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)1909 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1910 {
1911 SYS_ARCH_DECL_PROTECT(lev);
1912 if (fdset) {
1913 int i;
1914 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1915 /* if this FD is in the set, lock it (unless already done) */
1916 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1917 struct lwip_sock *sock;
1918 SYS_ARCH_PROTECT(lev);
1919 sock = tryget_socket_unconn_locked(i);
1920 if (sock != NULL) {
1921 /* leave the socket used until released by lwip_select_dec_sockets_used */
1922 FD_SET(i, used_sockets);
1923 }
1924 SYS_ARCH_UNPROTECT(lev);
1925 }
1926 }
1927 }
1928 }
1929
1930 /* Mark all sockets passed to select as used to prevent them from being freed
1931 * from other threads while select is running.
1932 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1933 * to unmark them correctly.
1934 */
1935 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)1936 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
1937 {
1938 FD_ZERO(used_sockets);
1939 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
1940 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
1941 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
1942 }
1943
1944 /* Let go all sockets that were marked as used when starting select */
1945 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)1946 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
1947 {
1948 int i;
1949 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1950 /* if this FD is not in the set, continue */
1951 if (FD_ISSET(i, used_sockets)) {
1952 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
1953 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
1954 if (sock != NULL) {
1955 done_socket(sock);
1956 }
1957 }
1958 }
1959 }
1960 #else /* LWIP_NETCONN_FULLDUPLEX */
1961 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
1962 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
1963 #endif /* LWIP_NETCONN_FULLDUPLEX */
1964
1965 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)1966 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
1967 struct timeval *timeout)
1968 {
1969 u32_t waitres = 0;
1970 int nready;
1971 fd_set lreadset, lwriteset, lexceptset;
1972 u32_t msectimeout;
1973 int i;
1974 int maxfdp2;
1975 #if LWIP_NETCONN_SEM_PER_THREAD
1976 int waited = 0;
1977 #endif
1978 #if LWIP_NETCONN_FULLDUPLEX
1979 fd_set used_sockets;
1980 #endif
1981 SYS_ARCH_DECL_PROTECT(lev);
1982
1983 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
1984 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
1985 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
1986 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
1987
1988 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
1989 set_errno(EINVAL);
1990 return -1;
1991 }
1992
1993 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
1994
1995 /* Go through each socket in each list to count number of sockets which
1996 currently match */
1997 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
1998
1999 if (nready < 0) {
2000 /* one of the sockets in one of the fd_sets was invalid */
2001 set_errno(EBADF);
2002 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2003 return -1;
2004 } else if (nready > 0) {
2005 /* one or more sockets are set, no need to wait */
2006 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2007 } else {
2008 /* If we don't have any current events, then suspend if we are supposed to */
2009 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2010 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2011 /* This is OK as the local fdsets are empty and nready is zero,
2012 or we would have returned earlier. */
2013 } else {
2014 /* None ready: add our semaphore to list:
2015 We don't actually need any dynamic memory. Our entry on the
2016 list is only valid while we are in this function, so it's ok
2017 to use local variables (unless we're running in MPU compatible
2018 mode). */
2019 API_SELECT_CB_VAR_DECLARE(select_cb);
2020 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2021 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2022
2023 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2024 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2025 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2026 #if LWIP_NETCONN_SEM_PER_THREAD
2027 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2028 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2029 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2030 /* failed to create semaphore */
2031 set_errno(ENOMEM);
2032 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2033 API_SELECT_CB_VAR_FREE(select_cb);
2034 return -1;
2035 }
2036 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2037
2038 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2039
2040 /* Increase select_waiting for each socket we are interested in */
2041 maxfdp2 = maxfdp1;
2042 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2043 if ((readset && FD_ISSET(i, readset)) ||
2044 (writeset && FD_ISSET(i, writeset)) ||
2045 (exceptset && FD_ISSET(i, exceptset))) {
2046 struct lwip_sock *sock;
2047 SYS_ARCH_PROTECT(lev);
2048 sock = tryget_socket_unconn_locked(i);
2049 if (sock != NULL) {
2050 sock->select_waiting++;
2051 if (sock->select_waiting == 0) {
2052 /* overflow - too many threads waiting */
2053 sock->select_waiting--;
2054 nready = -1;
2055 maxfdp2 = i;
2056 SYS_ARCH_UNPROTECT(lev);
2057 done_socket(sock);
2058 set_errno(EBUSY);
2059 break;
2060 }
2061 SYS_ARCH_UNPROTECT(lev);
2062 done_socket(sock);
2063 } else {
2064 /* Not a valid socket */
2065 nready = -1;
2066 maxfdp2 = i;
2067 SYS_ARCH_UNPROTECT(lev);
2068 set_errno(EBADF);
2069 break;
2070 }
2071 }
2072 }
2073
2074 if (nready >= 0) {
2075 /* Call lwip_selscan again: there could have been events between
2076 the last scan (without us on the list) and putting us on the list! */
2077 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2078 if (!nready) {
2079 /* Still none ready, just wait to be woken */
2080 if (timeout == 0) {
2081 /* Wait forever */
2082 msectimeout = 0;
2083 } else {
2084 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2085 if (msecs_long <= 0) {
2086 /* Wait 1ms at least (0 means wait forever) */
2087 msectimeout = 1;
2088 } else {
2089 msectimeout = (u32_t)msecs_long;
2090 }
2091 }
2092
2093 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2094 #if LWIP_NETCONN_SEM_PER_THREAD
2095 waited = 1;
2096 #endif
2097 }
2098 }
2099
2100 /* Decrease select_waiting for each socket we are interested in */
2101 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2102 if ((readset && FD_ISSET(i, readset)) ||
2103 (writeset && FD_ISSET(i, writeset)) ||
2104 (exceptset && FD_ISSET(i, exceptset))) {
2105 struct lwip_sock *sock;
2106 SYS_ARCH_PROTECT(lev);
2107 sock = tryget_socket_unconn_nouse(i);
2108 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2109 if (sock != NULL) {
2110 /* for now, handle select_waiting==0... */
2111 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2112 if (sock->select_waiting > 0) {
2113 sock->select_waiting--;
2114 }
2115 SYS_ARCH_UNPROTECT(lev);
2116 } else {
2117 SYS_ARCH_UNPROTECT(lev);
2118 /* Not a valid socket */
2119 nready = -1;
2120 set_errno(EBADF);
2121 }
2122 }
2123 }
2124
2125 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2126
2127 #if LWIP_NETCONN_SEM_PER_THREAD
2128 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2129 /* don't leave the thread-local semaphore signalled */
2130 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2131 }
2132 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2133 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2134 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2135 API_SELECT_CB_VAR_FREE(select_cb);
2136
2137 if (nready < 0) {
2138 /* This happens when a socket got closed while waiting */
2139 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2140 return -1;
2141 }
2142
2143 if (waitres == SYS_ARCH_TIMEOUT) {
2144 /* Timeout */
2145 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2146 /* This is OK as the local fdsets are empty and nready is zero,
2147 or we would have returned earlier. */
2148 } else {
2149 /* See what's set now after waiting */
2150 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2151 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2152 }
2153 }
2154 }
2155
2156 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2157 set_errno(0);
2158 if (readset) {
2159 *readset = lreadset;
2160 }
2161 if (writeset) {
2162 *writeset = lwriteset;
2163 }
2164 if (exceptset) {
2165 *exceptset = lexceptset;
2166 }
2167 return nready;
2168 }
2169 #endif /* LWIP_SOCKET_SELECT */
2170
2171 #if LWIP_SOCKET_POLL
2172 /** Options for the lwip_pollscan function. */
2173 enum lwip_pollscan_opts
2174 {
2175 /** Clear revents in each struct pollfd. */
2176 LWIP_POLLSCAN_CLEAR = 1,
2177
2178 /** Increment select_waiting in each struct lwip_sock. */
2179 LWIP_POLLSCAN_INC_WAIT = 2,
2180
2181 /** Decrement select_waiting in each struct lwip_sock. */
2182 LWIP_POLLSCAN_DEC_WAIT = 4
2183 };
2184
2185 /**
2186 * Update revents in each struct pollfd.
2187 * Optionally update select_waiting in struct lwip_sock.
2188 *
2189 * @param fds array of structures to update
2190 * @param nfds number of structures in fds
2191 * @param opts what to update and how
2192 * @return number of structures that have revents != 0
2193 */
2194 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2195 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2196 {
2197 int nready = 0;
2198 nfds_t fdi;
2199 struct lwip_sock *sock;
2200 SYS_ARCH_DECL_PROTECT(lev);
2201
2202 /* Go through each struct pollfd in the array. */
2203 for (fdi = 0; fdi < nfds; fdi++) {
2204 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2205 fds[fdi].revents = 0;
2206 }
2207
2208 /* Negative fd means the caller wants us to ignore this struct.
2209 POLLNVAL means we already detected that the fd is invalid;
2210 if another thread has since opened a new socket with that fd,
2211 we must not use that socket. */
2212 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2213 /* First get the socket's status (protected)... */
2214 SYS_ARCH_PROTECT(lev);
2215 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2216 if (sock != NULL) {
2217 void* lastdata = sock->lastdata.pbuf;
2218 s16_t rcvevent = sock->rcvevent;
2219 u16_t sendevent = sock->sendevent;
2220 u16_t errevent = sock->errevent;
2221
2222 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2223 sock->select_waiting++;
2224 if (sock->select_waiting == 0) {
2225 /* overflow - too many threads waiting */
2226 sock->select_waiting--;
2227 nready = -1;
2228 SYS_ARCH_UNPROTECT(lev);
2229 done_socket(sock);
2230 break;
2231 }
2232 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2233 /* for now, handle select_waiting==0... */
2234 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2235 if (sock->select_waiting > 0) {
2236 sock->select_waiting--;
2237 }
2238 }
2239 SYS_ARCH_UNPROTECT(lev);
2240 done_socket(sock);
2241
2242 /* ... then examine it: */
2243 /* See if netconn of this socket is ready for read */
2244 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2245 fds[fdi].revents |= POLLIN;
2246 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2247 }
2248 /* See if netconn of this socket is ready for write */
2249 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2250 fds[fdi].revents |= POLLOUT;
2251 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2252 }
2253 /* See if netconn of this socket had an error */
2254 if (errevent != 0) {
2255 /* POLLERR is output only. */
2256 fds[fdi].revents |= POLLERR;
2257 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2258 }
2259 } else {
2260 /* Not a valid socket */
2261 SYS_ARCH_UNPROTECT(lev);
2262 /* POLLNVAL is output only. */
2263 fds[fdi].revents |= POLLNVAL;
2264 return -1;
2265 }
2266 }
2267
2268 /* Will return the number of structures that have events,
2269 not the number of events. */
2270 if (fds[fdi].revents != 0) {
2271 nready++;
2272 }
2273 }
2274
2275 LWIP_ASSERT("nready >= 0", nready >= 0);
2276 return nready;
2277 }
2278
2279 #if LWIP_NETCONN_FULLDUPLEX
2280 /* Mark all sockets as used.
2281 *
2282 * All sockets are marked (and later unmarked), whether they are open or not.
2283 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2284 */
2285 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2286 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2287 {
2288 nfds_t fdi;
2289
2290 if(fds) {
2291 /* Go through each struct pollfd in the array. */
2292 for (fdi = 0; fdi < nfds; fdi++) {
2293 /* Increase the reference counter */
2294 tryget_socket_unconn(fds[fdi].fd);
2295 }
2296 }
2297 }
2298
2299 /* Let go all sockets that were marked as used when starting poll */
2300 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2301 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2302 {
2303 nfds_t fdi;
2304
2305 if(fds) {
2306 /* Go through each struct pollfd in the array. */
2307 for (fdi = 0; fdi < nfds; fdi++) {
2308 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2309 if (sock != NULL) {
2310 done_socket(sock);
2311 }
2312 }
2313 }
2314 }
2315 #else /* LWIP_NETCONN_FULLDUPLEX */
2316 #define lwip_poll_inc_sockets_used(fds, nfds)
2317 #define lwip_poll_dec_sockets_used(fds, nfds)
2318 #endif /* LWIP_NETCONN_FULLDUPLEX */
2319
2320 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2321 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2322 {
2323 u32_t waitres = 0;
2324 int nready;
2325 u32_t msectimeout;
2326 #if LWIP_NETCONN_SEM_PER_THREAD
2327 int waited = 0;
2328 #endif
2329
2330 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2331 (void*)fds, (int)nfds, timeout));
2332 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2333 set_errno(EINVAL); return -1;);
2334
2335 lwip_poll_inc_sockets_used(fds, nfds);
2336
2337 /* Go through each struct pollfd to count number of structures
2338 which currently match */
2339 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2340
2341 if (nready < 0) {
2342 lwip_poll_dec_sockets_used(fds, nfds);
2343 return -1;
2344 }
2345
2346 /* If we don't have any current events, then suspend if we are supposed to */
2347 if (!nready) {
2348 API_SELECT_CB_VAR_DECLARE(select_cb);
2349
2350 if (timeout == 0) {
2351 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2352 goto return_success;
2353 }
2354 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2355 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2356
2357 /* None ready: add our semaphore to list:
2358 We don't actually need any dynamic memory. Our entry on the
2359 list is only valid while we are in this function, so it's ok
2360 to use local variables. */
2361
2362 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2363 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2364 #if LWIP_NETCONN_SEM_PER_THREAD
2365 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2366 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2367 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2368 /* failed to create semaphore */
2369 set_errno(EAGAIN);
2370 lwip_poll_dec_sockets_used(fds, nfds);
2371 API_SELECT_CB_VAR_FREE(select_cb);
2372 return -1;
2373 }
2374 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2375
2376 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2377
2378 /* Increase select_waiting for each socket we are interested in.
2379 Also, check for events again: there could have been events between
2380 the last scan (without us on the list) and putting us on the list! */
2381 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2382
2383 if (!nready) {
2384 /* Still none ready, just wait to be woken */
2385 if (timeout < 0) {
2386 /* Wait forever */
2387 msectimeout = 0;
2388 } else {
2389 /* timeout == 0 would have been handled earlier. */
2390 LWIP_ASSERT("timeout > 0", timeout > 0);
2391 msectimeout = timeout;
2392 }
2393 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2394 #if LWIP_NETCONN_SEM_PER_THREAD
2395 waited = 1;
2396 #endif
2397 }
2398
2399 /* Decrease select_waiting for each socket we are interested in,
2400 and check which events occurred while we waited. */
2401 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2402
2403 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2404
2405 #if LWIP_NETCONN_SEM_PER_THREAD
2406 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2407 /* don't leave the thread-local semaphore signalled */
2408 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2409 }
2410 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2411 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2412 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2413 API_SELECT_CB_VAR_FREE(select_cb);
2414
2415 if (nready < 0) {
2416 /* This happens when a socket got closed while waiting */
2417 lwip_poll_dec_sockets_used(fds, nfds);
2418 return -1;
2419 }
2420
2421 if (waitres == SYS_ARCH_TIMEOUT) {
2422 /* Timeout */
2423 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2424 goto return_success;
2425 }
2426 }
2427
2428 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2429 return_success:
2430 lwip_poll_dec_sockets_used(fds, nfds);
2431 set_errno(0);
2432 return nready;
2433 }
2434
2435 /**
2436 * Check whether event_callback should wake up a thread waiting in
2437 * lwip_poll.
2438 */
2439 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2440 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2441 {
2442 nfds_t fdi;
2443 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2444 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2445 if (pollfd->fd == fd) {
2446 /* Do not update pollfd->revents right here;
2447 that would be a data race because lwip_pollscan
2448 accesses revents without protecting. */
2449 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2450 return 1;
2451 }
2452 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2453 return 1;
2454 }
2455 if (has_errevent) {
2456 /* POLLERR is output only. */
2457 return 1;
2458 }
2459 }
2460 }
2461 return 0;
2462 }
2463 #endif /* LWIP_SOCKET_POLL */
2464
2465 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2466 /**
2467 * Callback registered in the netconn layer for each socket-netconn.
2468 * Processes recvevent (data available) and wakes up tasks waiting for select.
2469 *
2470 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2471 * must have the core lock held when signaling the following events
2472 * as they might cause select_list_cb to be checked:
2473 * NETCONN_EVT_RCVPLUS
2474 * NETCONN_EVT_SENDPLUS
2475 * NETCONN_EVT_ERROR
2476 * This requirement will be asserted in select_check_waiters()
2477 */
2478 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2479 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2480 {
2481 int s, check_waiters;
2482 struct lwip_sock *sock;
2483 SYS_ARCH_DECL_PROTECT(lev);
2484
2485 LWIP_UNUSED_ARG(len);
2486
2487 /* Get socket */
2488 if (conn) {
2489 s = conn->socket;
2490 if (s < 0) {
2491 /* Data comes in right away after an accept, even though
2492 * the server task might not have created a new socket yet.
2493 * Just count down (or up) if that's the case and we
2494 * will use the data later. Note that only receive events
2495 * can happen before the new socket is set up. */
2496 SYS_ARCH_PROTECT(lev);
2497 if (conn->socket < 0) {
2498 if (evt == NETCONN_EVT_RCVPLUS) {
2499 /* conn->socket is -1 on initialization
2500 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2501 conn->socket--;
2502 }
2503 SYS_ARCH_UNPROTECT(lev);
2504 return;
2505 }
2506 s = conn->socket;
2507 SYS_ARCH_UNPROTECT(lev);
2508 }
2509
2510 sock = get_socket(s);
2511 if (!sock) {
2512 return;
2513 }
2514 } else {
2515 return;
2516 }
2517
2518 check_waiters = 1;
2519 SYS_ARCH_PROTECT(lev);
2520 /* Set event as required */
2521 switch (evt) {
2522 case NETCONN_EVT_RCVPLUS:
2523 sock->rcvevent++;
2524 if (sock->rcvevent > 1) {
2525 check_waiters = 0;
2526 }
2527 break;
2528 case NETCONN_EVT_RCVMINUS:
2529 sock->rcvevent--;
2530 check_waiters = 0;
2531 break;
2532 case NETCONN_EVT_SENDPLUS:
2533 if (sock->sendevent) {
2534 check_waiters = 0;
2535 }
2536 sock->sendevent = 1;
2537 break;
2538 case NETCONN_EVT_SENDMINUS:
2539 sock->sendevent = 0;
2540 check_waiters = 0;
2541 break;
2542 case NETCONN_EVT_ERROR:
2543 sock->errevent = 1;
2544 break;
2545 default:
2546 LWIP_ASSERT("unknown event", 0);
2547 break;
2548 }
2549
2550 if (sock->select_waiting && check_waiters) {
2551 /* Save which events are active */
2552 int has_recvevent, has_sendevent, has_errevent;
2553 has_recvevent = sock->rcvevent > 0;
2554 has_sendevent = sock->sendevent != 0;
2555 has_errevent = sock->errevent != 0;
2556 SYS_ARCH_UNPROTECT(lev);
2557 /* Check any select calls waiting on this socket */
2558 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2559 } else {
2560 SYS_ARCH_UNPROTECT(lev);
2561 }
2562 poll_check_waiters(s, check_waiters);
2563 done_socket(sock);
2564 }
2565
2566 /**
2567 * Check if any select waiters are waiting on this socket and its events
2568 *
2569 * @note on synchronization of select_cb_list:
2570 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2571 * the core lock. We do a single pass through the list and signal any waiters.
2572 * Core lock should already be held when calling here!!!!
2573
2574 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2575 * of the loop, thus creating a possibility where a thread could modify the
2576 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2577 * detect this change and restart the list walk. The list is expected to be small
2578 */
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent)2579 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2580 {
2581 struct lwip_select_cb *scb;
2582 #if !LWIP_TCPIP_CORE_LOCKING
2583 int last_select_cb_ctr;
2584 SYS_ARCH_DECL_PROTECT(lev);
2585 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2586
2587 LWIP_ASSERT_CORE_LOCKED();
2588
2589 #if !LWIP_TCPIP_CORE_LOCKING
2590 SYS_ARCH_PROTECT(lev);
2591 again:
2592 /* remember the state of select_cb_list to detect changes */
2593 last_select_cb_ctr = select_cb_ctr;
2594 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2595 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2596 if (scb->sem_signalled == 0) {
2597 /* semaphore not signalled yet */
2598 int do_signal = 0;
2599 #if LWIP_SOCKET_POLL
2600 if (scb->poll_fds != NULL) {
2601 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2602 }
2603 #endif /* LWIP_SOCKET_POLL */
2604 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2605 else
2606 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2607 #if LWIP_SOCKET_SELECT
2608 {
2609 /* Test this select call for our socket */
2610 if (has_recvevent) {
2611 if (scb->readset && FD_ISSET(s, scb->readset)) {
2612 do_signal = 1;
2613 }
2614 }
2615 if (has_sendevent) {
2616 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2617 do_signal = 1;
2618 }
2619 }
2620 if (has_errevent) {
2621 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2622 do_signal = 1;
2623 }
2624 }
2625 }
2626 #endif /* LWIP_SOCKET_SELECT */
2627 if (do_signal) {
2628 scb->sem_signalled = 1;
2629 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2630 the semaphore, as this might lead to the select thread taking itself off the list,
2631 invalidating the semaphore. */
2632 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2633 }
2634 }
2635 #if LWIP_TCPIP_CORE_LOCKING
2636 }
2637 #else
2638 /* unlock interrupts with each step */
2639 SYS_ARCH_UNPROTECT(lev);
2640 /* this makes sure interrupt protection time is short */
2641 SYS_ARCH_PROTECT(lev);
2642 if (last_select_cb_ctr != select_cb_ctr) {
2643 /* someone has changed select_cb_list, restart at the beginning */
2644 goto again;
2645 }
2646 /* remember the state of select_cb_list to detect changes */
2647 last_select_cb_ctr = select_cb_ctr;
2648 }
2649 SYS_ARCH_UNPROTECT(lev);
2650 #endif
2651 }
2652 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2653
2654 /**
2655 * Close one end of a full-duplex connection.
2656 */
2657 int
lwip_shutdown(int s,int how)2658 lwip_shutdown(int s, int how)
2659 {
2660 struct lwip_sock *sock;
2661 err_t err;
2662 u8_t shut_rx = 0, shut_tx = 0;
2663
2664 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2665
2666 sock = get_socket(s);
2667 if (!sock) {
2668 return -1;
2669 }
2670
2671 if (sock->conn != NULL) {
2672 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2673 sock_set_errno(sock, EOPNOTSUPP);
2674 done_socket(sock);
2675 return -1;
2676 }
2677 } else {
2678 sock_set_errno(sock, ENOTCONN);
2679 done_socket(sock);
2680 return -1;
2681 }
2682
2683 if (how == SHUT_RD) {
2684 shut_rx = 1;
2685 } else if (how == SHUT_WR) {
2686 shut_tx = 1;
2687 } else if (how == SHUT_RDWR) {
2688 shut_rx = 1;
2689 shut_tx = 1;
2690 } else {
2691 sock_set_errno(sock, EINVAL);
2692 done_socket(sock);
2693 return -1;
2694 }
2695 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2696
2697 sock_set_errno(sock, err_to_errno(err));
2698 done_socket(sock);
2699 return (err == ERR_OK ? 0 : -1);
2700 }
2701
2702 static int
lwip_getaddrname(int s,struct sockaddr * name,socklen_t * namelen,u8_t local)2703 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2704 {
2705 struct lwip_sock *sock;
2706 union sockaddr_aligned saddr;
2707 ip_addr_t naddr;
2708 u16_t port;
2709 err_t err;
2710
2711 sock = get_socket(s);
2712 if (!sock) {
2713 return -1;
2714 }
2715
2716 /* get the IP address and port */
2717 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2718 if (err != ERR_OK) {
2719 sock_set_errno(sock, err_to_errno(err));
2720 done_socket(sock);
2721 return -1;
2722 }
2723
2724 #if LWIP_IPV4 && LWIP_IPV6
2725 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2726 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2727 IP_IS_V4_VAL(naddr)) {
2728 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2729 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2730 }
2731 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2732
2733 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2734
2735 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2736 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2737 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2738
2739 if (*namelen > saddr.sa.sa_len) {
2740 *namelen = saddr.sa.sa_len;
2741 }
2742 MEMCPY(name, &saddr, *namelen);
2743
2744 sock_set_errno(sock, 0);
2745 done_socket(sock);
2746 return 0;
2747 }
2748
2749 int
lwip_getpeername(int s,struct sockaddr * name,socklen_t * namelen)2750 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2751 {
2752 return lwip_getaddrname(s, name, namelen, 0);
2753 }
2754
2755 int
lwip_getsockname(int s,struct sockaddr * name,socklen_t * namelen)2756 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2757 {
2758 return lwip_getaddrname(s, name, namelen, 1);
2759 }
2760
2761 int
lwip_getsockopt(int s,int level,int optname,void * optval,socklen_t * optlen)2762 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2763 {
2764 int err;
2765 struct lwip_sock *sock = get_socket(s);
2766 #if !LWIP_TCPIP_CORE_LOCKING
2767 err_t cberr;
2768 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2769 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2770
2771 if (!sock) {
2772 return -1;
2773 }
2774
2775 if ((NULL == optval) || (NULL == optlen)) {
2776 sock_set_errno(sock, EFAULT);
2777 done_socket(sock);
2778 return -1;
2779 }
2780
2781 #if LWIP_TCPIP_CORE_LOCKING
2782 /* core-locking can just call the -impl function */
2783 LOCK_TCPIP_CORE();
2784 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2785 UNLOCK_TCPIP_CORE();
2786
2787 #else /* LWIP_TCPIP_CORE_LOCKING */
2788
2789 #if LWIP_MPU_COMPATIBLE
2790 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2791 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2792 sock_set_errno(sock, ENOBUFS);
2793 done_socket(sock);
2794 return -1;
2795 }
2796 #endif /* LWIP_MPU_COMPATIBLE */
2797
2798 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2799 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2800 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2801 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2802 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2803 #if !LWIP_MPU_COMPATIBLE
2804 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2805 #endif /* !LWIP_MPU_COMPATIBLE */
2806 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2807 #if LWIP_NETCONN_SEM_PER_THREAD
2808 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2809 #else
2810 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2811 #endif
2812 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2813 if (cberr != ERR_OK) {
2814 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2815 sock_set_errno(sock, err_to_errno(cberr));
2816 done_socket(sock);
2817 return -1;
2818 }
2819 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2820
2821 /* write back optlen and optval */
2822 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2823 #if LWIP_MPU_COMPATIBLE
2824 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2825 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2826 #endif /* LWIP_MPU_COMPATIBLE */
2827
2828 /* maybe lwip_getsockopt_internal has changed err */
2829 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2830 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2831 #endif /* LWIP_TCPIP_CORE_LOCKING */
2832
2833 sock_set_errno(sock, err);
2834 done_socket(sock);
2835 return err ? -1 : 0;
2836 }
2837
2838 #if !LWIP_TCPIP_CORE_LOCKING
2839 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2840 * to get into the tcpip_thread
2841 */
2842 static void
lwip_getsockopt_callback(void * arg)2843 lwip_getsockopt_callback(void *arg)
2844 {
2845 struct lwip_setgetsockopt_data *data;
2846 LWIP_ASSERT("arg != NULL", arg != NULL);
2847 data = (struct lwip_setgetsockopt_data *)arg;
2848
2849 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2850 #if LWIP_MPU_COMPATIBLE
2851 data->optval,
2852 #else /* LWIP_MPU_COMPATIBLE */
2853 data->optval.p,
2854 #endif /* LWIP_MPU_COMPATIBLE */
2855 &data->optlen);
2856
2857 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2858 }
2859 #endif /* LWIP_TCPIP_CORE_LOCKING */
2860
2861 static int
lwip_sockopt_to_ipopt(int optname)2862 lwip_sockopt_to_ipopt(int optname)
2863 {
2864 /* Map SO_* values to our internal SOF_* values
2865 * We should not rely on #defines in socket.h
2866 * being in sync with ip.h.
2867 */
2868 switch (optname) {
2869 case SO_BROADCAST:
2870 return SOF_BROADCAST;
2871 case SO_KEEPALIVE:
2872 return SOF_KEEPALIVE;
2873 case SO_REUSEADDR:
2874 return SOF_REUSEADDR;
2875 default:
2876 LWIP_ASSERT("Unknown socket option", 0);
2877 return 0;
2878 }
2879 }
2880
2881 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2882 * same argument as lwip_getsockopt, either called directly or through callback
2883 */
2884 static int
lwip_getsockopt_impl(int s,int level,int optname,void * optval,socklen_t * optlen)2885 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2886 {
2887 int err = 0;
2888 struct lwip_sock *sock = tryget_socket(s);
2889 if (!sock) {
2890 return EBADF;
2891 }
2892
2893 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2894 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2895 return err;
2896 }
2897 #endif
2898
2899 switch (level) {
2900
2901 /* Level: SOL_SOCKET */
2902 case SOL_SOCKET:
2903 switch (optname) {
2904
2905 #if LWIP_TCP
2906 case SO_ACCEPTCONN:
2907 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2908 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2909 done_socket(sock);
2910 return ENOPROTOOPT;
2911 }
2912 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2913 *(int *)optval = 1;
2914 } else {
2915 *(int *)optval = 0;
2916 }
2917 break;
2918 #endif /* LWIP_TCP */
2919
2920 /* The option flags */
2921 case SO_BROADCAST:
2922 case SO_KEEPALIVE:
2923 #if SO_REUSE
2924 case SO_REUSEADDR:
2925 #endif /* SO_REUSE */
2926 if ((optname == SO_BROADCAST) &&
2927 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
2928 done_socket(sock);
2929 return ENOPROTOOPT;
2930 }
2931
2932 optname = lwip_sockopt_to_ipopt(optname);
2933
2934 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2935 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
2936 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
2937 s, optname, (*(int *)optval ? "on" : "off")));
2938 break;
2939
2940 case SO_TYPE:
2941 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2942 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
2943 case NETCONN_RAW:
2944 *(int *)optval = SOCK_RAW;
2945 break;
2946 case NETCONN_TCP:
2947 *(int *)optval = SOCK_STREAM;
2948 break;
2949 case NETCONN_UDP:
2950 *(int *)optval = SOCK_DGRAM;
2951 break;
2952 default: /* unrecognized socket type */
2953 *(int *)optval = netconn_type(sock->conn);
2954 LWIP_DEBUGF(SOCKETS_DEBUG,
2955 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
2956 s, *(int *)optval));
2957 } /* switch (netconn_type(sock->conn)) */
2958 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
2959 s, *(int *)optval));
2960 break;
2961
2962 case SO_ERROR:
2963 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
2964 *(int *)optval = err_to_errno(netconn_err(sock->conn));
2965 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
2966 s, *(int *)optval));
2967 break;
2968
2969 #if LWIP_SO_SNDTIMEO
2970 case SO_SNDTIMEO:
2971 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2972 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
2973 break;
2974 #endif /* LWIP_SO_SNDTIMEO */
2975 #if LWIP_SO_RCVTIMEO
2976 case SO_RCVTIMEO:
2977 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2978 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
2979 break;
2980 #endif /* LWIP_SO_RCVTIMEO */
2981 #if LWIP_SO_RCVBUF
2982 case SO_RCVBUF:
2983 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2984 *(int *)optval = netconn_get_recvbufsize(sock->conn);
2985 break;
2986 #endif /* LWIP_SO_RCVBUF */
2987 #if LWIP_SO_LINGER
2988 case SO_LINGER: {
2989 s16_t conn_linger;
2990 struct linger *linger = (struct linger *)optval;
2991 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
2992 conn_linger = sock->conn->linger;
2993 if (conn_linger >= 0) {
2994 linger->l_onoff = 1;
2995 linger->l_linger = (int)conn_linger;
2996 } else {
2997 linger->l_onoff = 0;
2998 linger->l_linger = 0;
2999 }
3000 }
3001 break;
3002 #endif /* LWIP_SO_LINGER */
3003 #if LWIP_UDP
3004 case SO_NO_CHECK:
3005 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3006 #if LWIP_UDPLITE
3007 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3008 /* this flag is only available for UDP, not for UDP lite */
3009 done_socket(sock);
3010 return EAFNOSUPPORT;
3011 }
3012 #endif /* LWIP_UDPLITE */
3013 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3014 break;
3015 #endif /* LWIP_UDP*/
3016 default:
3017 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3018 s, optname));
3019 err = ENOPROTOOPT;
3020 break;
3021 } /* switch (optname) */
3022 break;
3023
3024 /* Level: IPPROTO_IP */
3025 case IPPROTO_IP:
3026 switch (optname) {
3027 case IP_TTL:
3028 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3029 *(int *)optval = sock->conn->pcb.ip->ttl;
3030 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3031 s, *(int *)optval));
3032 break;
3033 case IP_TOS:
3034 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3035 *(int *)optval = sock->conn->pcb.ip->tos;
3036 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3037 s, *(int *)optval));
3038 break;
3039 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3040 case IP_MULTICAST_TTL:
3041 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3042 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3043 done_socket(sock);
3044 return ENOPROTOOPT;
3045 }
3046 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3047 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3048 s, *(int *)optval));
3049 break;
3050 case IP_MULTICAST_IF:
3051 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3052 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3053 done_socket(sock);
3054 return ENOPROTOOPT;
3055 }
3056 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3057 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3058 s, *(u32_t *)optval));
3059 break;
3060 case IP_MULTICAST_LOOP:
3061 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3062 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3063 *(u8_t *)optval = 1;
3064 } else {
3065 *(u8_t *)optval = 0;
3066 }
3067 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3068 s, *(int *)optval));
3069 break;
3070 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3071 default:
3072 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3073 s, optname));
3074 err = ENOPROTOOPT;
3075 break;
3076 } /* switch (optname) */
3077 break;
3078
3079 #if LWIP_TCP
3080 /* Level: IPPROTO_TCP */
3081 case IPPROTO_TCP:
3082 /* Special case: all IPPROTO_TCP option take an int */
3083 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3084 if (sock->conn->pcb.tcp->state == LISTEN) {
3085 done_socket(sock);
3086 return EINVAL;
3087 }
3088 switch (optname) {
3089 case TCP_NODELAY:
3090 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3091 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3092 s, (*(int *)optval) ? "on" : "off") );
3093 break;
3094 case TCP_KEEPALIVE:
3095 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3096 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3097 s, *(int *)optval));
3098 break;
3099
3100 #if LWIP_TCP_KEEPALIVE
3101 case TCP_KEEPIDLE:
3102 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3103 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3104 s, *(int *)optval));
3105 break;
3106 case TCP_KEEPINTVL:
3107 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3108 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3109 s, *(int *)optval));
3110 break;
3111 case TCP_KEEPCNT:
3112 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3113 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3114 s, *(int *)optval));
3115 break;
3116 #endif /* LWIP_TCP_KEEPALIVE */
3117 default:
3118 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3119 s, optname));
3120 err = ENOPROTOOPT;
3121 break;
3122 } /* switch (optname) */
3123 break;
3124 #endif /* LWIP_TCP */
3125
3126 #if LWIP_IPV6
3127 /* Level: IPPROTO_IPV6 */
3128 case IPPROTO_IPV6:
3129 switch (optname) {
3130 case IPV6_V6ONLY:
3131 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3132 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3133 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3134 s, *(int *)optval));
3135 break;
3136 default:
3137 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3138 s, optname));
3139 err = ENOPROTOOPT;
3140 break;
3141 } /* switch (optname) */
3142 break;
3143 #endif /* LWIP_IPV6 */
3144
3145 #if LWIP_UDP && LWIP_UDPLITE
3146 /* Level: IPPROTO_UDPLITE */
3147 case IPPROTO_UDPLITE:
3148 /* Special case: all IPPROTO_UDPLITE option take an int */
3149 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3150 /* If this is no UDP lite socket, ignore any options. */
3151 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3152 done_socket(sock);
3153 return ENOPROTOOPT;
3154 }
3155 switch (optname) {
3156 case UDPLITE_SEND_CSCOV:
3157 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3158 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3159 s, (*(int *)optval)) );
3160 break;
3161 case UDPLITE_RECV_CSCOV:
3162 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3163 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3164 s, (*(int *)optval)) );
3165 break;
3166 default:
3167 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3168 s, optname));
3169 err = ENOPROTOOPT;
3170 break;
3171 } /* switch (optname) */
3172 break;
3173 #endif /* LWIP_UDP */
3174 /* Level: IPPROTO_RAW */
3175 case IPPROTO_RAW:
3176 switch (optname) {
3177 #if LWIP_IPV6 && LWIP_RAW
3178 case IPV6_CHECKSUM:
3179 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3180 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3181 *(int *)optval = -1;
3182 } else {
3183 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3184 }
3185 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3186 s, (*(int *)optval)) );
3187 break;
3188 #endif /* LWIP_IPV6 && LWIP_RAW */
3189 default:
3190 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3191 s, optname));
3192 err = ENOPROTOOPT;
3193 break;
3194 } /* switch (optname) */
3195 break;
3196 default:
3197 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3198 s, level, optname));
3199 err = ENOPROTOOPT;
3200 break;
3201 } /* switch (level) */
3202
3203 done_socket(sock);
3204 return err;
3205 }
3206
3207 int
lwip_setsockopt(int s,int level,int optname,const void * optval,socklen_t optlen)3208 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3209 {
3210 int err = 0;
3211 struct lwip_sock *sock = get_socket(s);
3212 #if !LWIP_TCPIP_CORE_LOCKING
3213 err_t cberr;
3214 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3215 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3216
3217 if (!sock) {
3218 return -1;
3219 }
3220
3221 if (NULL == optval) {
3222 sock_set_errno(sock, EFAULT);
3223 done_socket(sock);
3224 return -1;
3225 }
3226
3227 #if LWIP_TCPIP_CORE_LOCKING
3228 /* core-locking can just call the -impl function */
3229 LOCK_TCPIP_CORE();
3230 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3231 UNLOCK_TCPIP_CORE();
3232
3233 #else /* LWIP_TCPIP_CORE_LOCKING */
3234
3235 #if LWIP_MPU_COMPATIBLE
3236 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3237 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3238 sock_set_errno(sock, ENOBUFS);
3239 done_socket(sock);
3240 return -1;
3241 }
3242 #endif /* LWIP_MPU_COMPATIBLE */
3243
3244 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3245 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3246 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3247 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3248 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3249 #if LWIP_MPU_COMPATIBLE
3250 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3251 #else /* LWIP_MPU_COMPATIBLE */
3252 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3253 #endif /* LWIP_MPU_COMPATIBLE */
3254 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3255 #if LWIP_NETCONN_SEM_PER_THREAD
3256 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3257 #else
3258 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3259 #endif
3260 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3261 if (cberr != ERR_OK) {
3262 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3263 sock_set_errno(sock, err_to_errno(cberr));
3264 done_socket(sock);
3265 return -1;
3266 }
3267 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3268
3269 /* maybe lwip_getsockopt_internal has changed err */
3270 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3271 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3272 #endif /* LWIP_TCPIP_CORE_LOCKING */
3273
3274 sock_set_errno(sock, err);
3275 done_socket(sock);
3276 return err ? -1 : 0;
3277 }
3278
3279 #if !LWIP_TCPIP_CORE_LOCKING
3280 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3281 * to get into the tcpip_thread
3282 */
3283 static void
lwip_setsockopt_callback(void * arg)3284 lwip_setsockopt_callback(void *arg)
3285 {
3286 struct lwip_setgetsockopt_data *data;
3287 LWIP_ASSERT("arg != NULL", arg != NULL);
3288 data = (struct lwip_setgetsockopt_data *)arg;
3289
3290 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3291 #if LWIP_MPU_COMPATIBLE
3292 data->optval,
3293 #else /* LWIP_MPU_COMPATIBLE */
3294 data->optval.pc,
3295 #endif /* LWIP_MPU_COMPATIBLE */
3296 data->optlen);
3297
3298 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3299 }
3300 #endif /* LWIP_TCPIP_CORE_LOCKING */
3301
3302 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3303 * same argument as lwip_setsockopt, either called directly or through callback
3304 */
3305 static int
lwip_setsockopt_impl(int s,int level,int optname,const void * optval,socklen_t optlen)3306 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3307 {
3308 int err = 0;
3309 struct lwip_sock *sock = tryget_socket(s);
3310 if (!sock) {
3311 return EBADF;
3312 }
3313
3314 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3315 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3316 return err;
3317 }
3318 #endif
3319
3320 switch (level) {
3321
3322 /* Level: SOL_SOCKET */
3323 case SOL_SOCKET:
3324 switch (optname) {
3325
3326 /* SO_ACCEPTCONN is get-only */
3327
3328 /* The option flags */
3329 case SO_BROADCAST:
3330 case SO_KEEPALIVE:
3331 #if SO_REUSE
3332 case SO_REUSEADDR:
3333 #endif /* SO_REUSE */
3334 if ((optname == SO_BROADCAST) &&
3335 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3336 done_socket(sock);
3337 return ENOPROTOOPT;
3338 }
3339
3340 optname = lwip_sockopt_to_ipopt(optname);
3341
3342 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3343 if (*(const int *)optval) {
3344 ip_set_option(sock->conn->pcb.ip, optname);
3345 } else {
3346 ip_reset_option(sock->conn->pcb.ip, optname);
3347 }
3348 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3349 s, optname, (*(const int *)optval ? "on" : "off")));
3350 break;
3351
3352 /* SO_TYPE is get-only */
3353 /* SO_ERROR is get-only */
3354
3355 #if LWIP_SO_SNDTIMEO
3356 case SO_SNDTIMEO: {
3357 long ms_long;
3358 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3359 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3360 if (ms_long < 0) {
3361 done_socket(sock);
3362 return EINVAL;
3363 }
3364 netconn_set_sendtimeout(sock->conn, ms_long);
3365 break;
3366 }
3367 #endif /* LWIP_SO_SNDTIMEO */
3368 #if LWIP_SO_RCVTIMEO
3369 case SO_RCVTIMEO: {
3370 long ms_long;
3371 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3372 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3373 if (ms_long < 0) {
3374 done_socket(sock);
3375 return EINVAL;
3376 }
3377 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3378 break;
3379 }
3380 #endif /* LWIP_SO_RCVTIMEO */
3381 #if LWIP_SO_RCVBUF
3382 case SO_RCVBUF:
3383 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3384 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3385 break;
3386 #endif /* LWIP_SO_RCVBUF */
3387 #if LWIP_SO_LINGER
3388 case SO_LINGER: {
3389 const struct linger *linger = (const struct linger *)optval;
3390 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3391 if (linger->l_onoff) {
3392 int lingersec = linger->l_linger;
3393 if (lingersec < 0) {
3394 done_socket(sock);
3395 return EINVAL;
3396 }
3397 if (lingersec > 0xFFFF) {
3398 lingersec = 0xFFFF;
3399 }
3400 sock->conn->linger = (s16_t)lingersec;
3401 } else {
3402 sock->conn->linger = -1;
3403 }
3404 }
3405 break;
3406 #endif /* LWIP_SO_LINGER */
3407 #if LWIP_UDP
3408 case SO_NO_CHECK:
3409 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3410 #if LWIP_UDPLITE
3411 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3412 /* this flag is only available for UDP, not for UDP lite */
3413 done_socket(sock);
3414 return EAFNOSUPPORT;
3415 }
3416 #endif /* LWIP_UDPLITE */
3417 if (*(const int *)optval) {
3418 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3419 } else {
3420 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3421 }
3422 break;
3423 #endif /* LWIP_UDP */
3424 case SO_BINDTODEVICE: {
3425 const struct ifreq *iface;
3426 struct netif *n = NULL;
3427
3428 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3429
3430 iface = (const struct ifreq *)optval;
3431 if (iface->ifr_name[0] != 0) {
3432 n = netif_find(iface->ifr_name);
3433 if (n == NULL) {
3434 done_socket(sock);
3435 return ENODEV;
3436 }
3437 }
3438
3439 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3440 #if LWIP_TCP
3441 case NETCONN_TCP:
3442 tcp_bind_netif(sock->conn->pcb.tcp, n);
3443 break;
3444 #endif
3445 #if LWIP_UDP
3446 case NETCONN_UDP:
3447 udp_bind_netif(sock->conn->pcb.udp, n);
3448 break;
3449 #endif
3450 #if LWIP_RAW
3451 case NETCONN_RAW:
3452 raw_bind_netif(sock->conn->pcb.raw, n);
3453 break;
3454 #endif
3455 default:
3456 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3457 break;
3458 }
3459 }
3460 break;
3461 default:
3462 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3463 s, optname));
3464 err = ENOPROTOOPT;
3465 break;
3466 } /* switch (optname) */
3467 break;
3468
3469 /* Level: IPPROTO_IP */
3470 case IPPROTO_IP:
3471 switch (optname) {
3472 case IP_TTL:
3473 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3474 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3475 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3476 s, sock->conn->pcb.ip->ttl));
3477 break;
3478 case IP_TOS:
3479 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3480 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3481 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3482 s, sock->conn->pcb.ip->tos));
3483 break;
3484 #if LWIP_NETBUF_RECVINFO
3485 case IP_PKTINFO:
3486 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3487 if (*(const int *)optval) {
3488 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3489 } else {
3490 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3491 }
3492 break;
3493 #endif /* LWIP_NETBUF_RECVINFO */
3494 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3495 case IP_MULTICAST_TTL:
3496 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3497 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3498 break;
3499 case IP_MULTICAST_IF: {
3500 ip4_addr_t if_addr;
3501 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3502 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3503 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3504 }
3505 break;
3506 case IP_MULTICAST_LOOP:
3507 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3508 if (*(const u8_t *)optval) {
3509 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3510 } else {
3511 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3512 }
3513 break;
3514 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3515 #if LWIP_IGMP
3516 case IP_ADD_MEMBERSHIP:
3517 case IP_DROP_MEMBERSHIP: {
3518 /* If this is a TCP or a RAW socket, ignore these options. */
3519 err_t igmp_err;
3520 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3521 ip4_addr_t if_addr;
3522 ip4_addr_t multi_addr;
3523 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3524 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3525 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3526 if (optname == IP_ADD_MEMBERSHIP) {
3527 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3528 /* cannot track membership (out of memory) */
3529 err = ENOMEM;
3530 igmp_err = ERR_OK;
3531 } else {
3532 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3533 }
3534 } else {
3535 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3536 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3537 }
3538 if (igmp_err != ERR_OK) {
3539 err = EADDRNOTAVAIL;
3540 }
3541 }
3542 break;
3543 #endif /* LWIP_IGMP */
3544 default:
3545 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3546 s, optname));
3547 err = ENOPROTOOPT;
3548 break;
3549 } /* switch (optname) */
3550 break;
3551
3552 #if LWIP_TCP
3553 /* Level: IPPROTO_TCP */
3554 case IPPROTO_TCP:
3555 /* Special case: all IPPROTO_TCP option take an int */
3556 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3557 if (sock->conn->pcb.tcp->state == LISTEN) {
3558 done_socket(sock);
3559 return EINVAL;
3560 }
3561 switch (optname) {
3562 case TCP_NODELAY:
3563 if (*(const int *)optval) {
3564 tcp_nagle_disable(sock->conn->pcb.tcp);
3565 } else {
3566 tcp_nagle_enable(sock->conn->pcb.tcp);
3567 }
3568 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3569 s, (*(const int *)optval) ? "on" : "off") );
3570 break;
3571 case TCP_KEEPALIVE:
3572 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3573 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3574 s, sock->conn->pcb.tcp->keep_idle));
3575 break;
3576
3577 #if LWIP_TCP_KEEPALIVE
3578 case TCP_KEEPIDLE:
3579 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3580 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3581 s, sock->conn->pcb.tcp->keep_idle));
3582 break;
3583 case TCP_KEEPINTVL:
3584 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3585 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3586 s, sock->conn->pcb.tcp->keep_intvl));
3587 break;
3588 case TCP_KEEPCNT:
3589 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3590 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3591 s, sock->conn->pcb.tcp->keep_cnt));
3592 break;
3593 #endif /* LWIP_TCP_KEEPALIVE */
3594 default:
3595 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3596 s, optname));
3597 err = ENOPROTOOPT;
3598 break;
3599 } /* switch (optname) */
3600 break;
3601 #endif /* LWIP_TCP*/
3602
3603 #if LWIP_IPV6
3604 /* Level: IPPROTO_IPV6 */
3605 case IPPROTO_IPV6:
3606 switch (optname) {
3607 case IPV6_V6ONLY:
3608 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3609 if (*(const int *)optval) {
3610 netconn_set_ipv6only(sock->conn, 1);
3611 } else {
3612 netconn_set_ipv6only(sock->conn, 0);
3613 }
3614 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3615 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3616 break;
3617 #if LWIP_IPV6_MLD
3618 case IPV6_JOIN_GROUP:
3619 case IPV6_LEAVE_GROUP: {
3620 /* If this is a TCP or a RAW socket, ignore these options. */
3621 err_t mld6_err;
3622 struct netif *netif;
3623 ip6_addr_t multi_addr;
3624 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3625 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3626 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3627 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3628 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3629 if (netif == NULL) {
3630 err = EADDRNOTAVAIL;
3631 break;
3632 }
3633
3634 if (optname == IPV6_JOIN_GROUP) {
3635 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3636 /* cannot track membership (out of memory) */
3637 err = ENOMEM;
3638 mld6_err = ERR_OK;
3639 } else {
3640 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3641 }
3642 } else {
3643 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3644 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3645 }
3646 if (mld6_err != ERR_OK) {
3647 err = EADDRNOTAVAIL;
3648 }
3649 }
3650 break;
3651 #endif /* LWIP_IPV6_MLD */
3652 default:
3653 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3654 s, optname));
3655 err = ENOPROTOOPT;
3656 break;
3657 } /* switch (optname) */
3658 break;
3659 #endif /* LWIP_IPV6 */
3660
3661 #if LWIP_UDP && LWIP_UDPLITE
3662 /* Level: IPPROTO_UDPLITE */
3663 case IPPROTO_UDPLITE:
3664 /* Special case: all IPPROTO_UDPLITE option take an int */
3665 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3666 /* If this is no UDP lite socket, ignore any options. */
3667 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3668 done_socket(sock);
3669 return ENOPROTOOPT;
3670 }
3671 switch (optname) {
3672 case UDPLITE_SEND_CSCOV:
3673 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3674 /* don't allow illegal values! */
3675 sock->conn->pcb.udp->chksum_len_tx = 8;
3676 } else {
3677 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3678 }
3679 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3680 s, (*(const int *)optval)) );
3681 break;
3682 case UDPLITE_RECV_CSCOV:
3683 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3684 /* don't allow illegal values! */
3685 sock->conn->pcb.udp->chksum_len_rx = 8;
3686 } else {
3687 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3688 }
3689 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3690 s, (*(const int *)optval)) );
3691 break;
3692 default:
3693 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3694 s, optname));
3695 err = ENOPROTOOPT;
3696 break;
3697 } /* switch (optname) */
3698 break;
3699 #endif /* LWIP_UDP */
3700 /* Level: IPPROTO_RAW */
3701 case IPPROTO_RAW:
3702 switch (optname) {
3703 #if LWIP_IPV6 && LWIP_RAW
3704 case IPV6_CHECKSUM:
3705 /* It should not be possible to disable the checksum generation with ICMPv6
3706 * as per RFC 3542 chapter 3.1 */
3707 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3708 done_socket(sock);
3709 return EINVAL;
3710 }
3711
3712 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3713 if (*(const int *)optval < 0) {
3714 sock->conn->pcb.raw->chksum_reqd = 0;
3715 } else if (*(const int *)optval & 1) {
3716 /* Per RFC3542, odd offsets are not allowed */
3717 done_socket(sock);
3718 return EINVAL;
3719 } else {
3720 sock->conn->pcb.raw->chksum_reqd = 1;
3721 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3722 }
3723 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3724 s, sock->conn->pcb.raw->chksum_reqd));
3725 break;
3726 #endif /* LWIP_IPV6 && LWIP_RAW */
3727 default:
3728 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3729 s, optname));
3730 err = ENOPROTOOPT;
3731 break;
3732 } /* switch (optname) */
3733 break;
3734 default:
3735 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3736 s, level, optname));
3737 err = ENOPROTOOPT;
3738 break;
3739 } /* switch (level) */
3740
3741 done_socket(sock);
3742 return err;
3743 }
3744
3745 int
lwip_ioctl(int s,long cmd,void * argp)3746 lwip_ioctl(int s, long cmd, void *argp)
3747 {
3748 struct lwip_sock *sock = get_socket(s);
3749 u8_t val;
3750 #if LWIP_SO_RCVBUF
3751 int recv_avail;
3752 #endif /* LWIP_SO_RCVBUF */
3753
3754 if (!sock) {
3755 return -1;
3756 }
3757
3758 switch (cmd) {
3759 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3760 case FIONREAD:
3761 if (!argp) {
3762 sock_set_errno(sock, EINVAL);
3763 done_socket(sock);
3764 return -1;
3765 }
3766 #if LWIP_FIONREAD_LINUXMODE
3767 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3768 struct netbuf *nb;
3769 if (sock->lastdata.netbuf) {
3770 nb = sock->lastdata.netbuf;
3771 *((int *)argp) = nb->p->tot_len;
3772 } else {
3773 struct netbuf *rxbuf;
3774 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3775 if (err != ERR_OK) {
3776 *((int *)argp) = 0;
3777 } else {
3778 sock->lastdata.netbuf = rxbuf;
3779 *((int *)argp) = rxbuf->p->tot_len;
3780 }
3781 }
3782 done_socket(sock);
3783 return 0;
3784 }
3785 #endif /* LWIP_FIONREAD_LINUXMODE */
3786
3787 #if LWIP_SO_RCVBUF
3788 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3789 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3790 if (recv_avail < 0) {
3791 recv_avail = 0;
3792 }
3793
3794 /* Check if there is data left from the last recv operation. /maq 041215 */
3795 if (sock->lastdata.netbuf) {
3796 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3797 recv_avail += sock->lastdata.pbuf->tot_len;
3798 } else {
3799 recv_avail += sock->lastdata.netbuf->p->tot_len;
3800 }
3801 }
3802 *((int *)argp) = recv_avail;
3803
3804 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3805 sock_set_errno(sock, 0);
3806 done_socket(sock);
3807 return 0;
3808 #else /* LWIP_SO_RCVBUF */
3809 break;
3810 #endif /* LWIP_SO_RCVBUF */
3811 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3812
3813 case (long)FIONBIO:
3814 val = 0;
3815 if (argp && *(int *)argp) {
3816 val = 1;
3817 }
3818 netconn_set_nonblocking(sock->conn, val);
3819 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3820 sock_set_errno(sock, 0);
3821 done_socket(sock);
3822 return 0;
3823
3824 default:
3825 IOCTL_CMD_CASE_HANDLER();
3826 break;
3827 } /* switch (cmd) */
3828 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3829 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3830 done_socket(sock);
3831 return -1;
3832 }
3833
3834 /** A minimal implementation of fcntl.
3835 * Currently only the commands F_GETFL and F_SETFL are implemented.
3836 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3837 * the flag O_NONBLOCK is implemented for F_SETFL.
3838 */
3839 int
lwip_fcntl(int s,int cmd,int val)3840 lwip_fcntl(int s, int cmd, int val)
3841 {
3842 struct lwip_sock *sock = get_socket(s);
3843 int ret = -1;
3844 int op_mode = 0;
3845
3846 if (!sock) {
3847 return -1;
3848 }
3849
3850 switch (cmd) {
3851 case F_GETFL:
3852 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3853 sock_set_errno(sock, 0);
3854
3855 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3856 #if LWIP_TCPIP_CORE_LOCKING
3857 LOCK_TCPIP_CORE();
3858 #else
3859 SYS_ARCH_DECL_PROTECT(lev);
3860 /* the proper thing to do here would be to get into the tcpip_thread,
3861 but locking should be OK as well since we only *read* some flags */
3862 SYS_ARCH_PROTECT(lev);
3863 #endif
3864 #if LWIP_TCP
3865 if (sock->conn->pcb.tcp) {
3866 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3867 op_mode |= O_RDONLY;
3868 }
3869 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3870 op_mode |= O_WRONLY;
3871 }
3872 }
3873 #endif
3874 #if LWIP_TCPIP_CORE_LOCKING
3875 UNLOCK_TCPIP_CORE();
3876 #else
3877 SYS_ARCH_UNPROTECT(lev);
3878 #endif
3879 } else {
3880 op_mode |= O_RDWR;
3881 }
3882
3883 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3884 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3885
3886 break;
3887 case F_SETFL:
3888 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3889 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3890 if ((val & ~O_NONBLOCK) == 0) {
3891 /* only O_NONBLOCK, all other bits are zero */
3892 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3893 ret = 0;
3894 sock_set_errno(sock, 0);
3895 } else {
3896 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3897 }
3898 break;
3899 default:
3900 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3901 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3902 break;
3903 }
3904 done_socket(sock);
3905 return ret;
3906 }
3907
3908 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3909 int
fcntl(int s,int cmd,...)3910 fcntl(int s, int cmd, ...)
3911 {
3912 va_list ap;
3913 int val;
3914
3915 va_start(ap, cmd);
3916 val = va_arg(ap, int);
3917 va_end(ap);
3918 return lwip_fcntl(s, cmd, val);
3919 }
3920 #endif
3921
3922 const char *
lwip_inet_ntop(int af,const void * src,char * dst,socklen_t size)3923 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
3924 {
3925 const char *ret = NULL;
3926 int size_int = (int)size;
3927 if (size_int < 0) {
3928 set_errno(ENOSPC);
3929 return NULL;
3930 }
3931 switch (af) {
3932 #if LWIP_IPV4
3933 case AF_INET:
3934 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
3935 if (ret == NULL) {
3936 set_errno(ENOSPC);
3937 }
3938 break;
3939 #endif
3940 #if LWIP_IPV6
3941 case AF_INET6:
3942 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
3943 if (ret == NULL) {
3944 set_errno(ENOSPC);
3945 }
3946 break;
3947 #endif
3948 default:
3949 set_errno(EAFNOSUPPORT);
3950 break;
3951 }
3952 return ret;
3953 }
3954
3955 int
lwip_inet_pton(int af,const char * src,void * dst)3956 lwip_inet_pton(int af, const char *src, void *dst)
3957 {
3958 int err;
3959 switch (af) {
3960 #if LWIP_IPV4
3961 case AF_INET:
3962 err = ip4addr_aton(src, (ip4_addr_t *)dst);
3963 break;
3964 #endif
3965 #if LWIP_IPV6
3966 case AF_INET6: {
3967 /* convert into temporary variable since ip6_addr_t might be larger
3968 than in6_addr when scopes are enabled */
3969 ip6_addr_t addr;
3970 err = ip6addr_aton(src, &addr);
3971 if (err) {
3972 memcpy(dst, &addr.addr, sizeof(addr.addr));
3973 }
3974 break;
3975 }
3976 #endif
3977 default:
3978 err = -1;
3979 set_errno(EAFNOSUPPORT);
3980 break;
3981 }
3982 return err;
3983 }
3984
3985 #if LWIP_IGMP
3986 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
3987 *
3988 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3989 *
3990 * @return 1 on success, 0 on failure
3991 */
3992 static int
lwip_socket_register_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)3993 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
3994 {
3995 struct lwip_sock *sock = get_socket(s);
3996 int i;
3997
3998 if (!sock) {
3999 return 0;
4000 }
4001
4002 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4003 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4004 socket_ipv4_multicast_memberships[i].sock = sock;
4005 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4006 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4007 done_socket(sock);
4008 return 1;
4009 }
4010 }
4011 done_socket(sock);
4012 return 0;
4013 }
4014
4015 /** Unregister a previously registered membership. This prevents dropping the membership
4016 * on socket close.
4017 *
4018 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4019 */
4020 static void
lwip_socket_unregister_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4021 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4022 {
4023 struct lwip_sock *sock = get_socket(s);
4024 int i;
4025
4026 if (!sock) {
4027 return;
4028 }
4029
4030 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4031 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4032 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4033 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4034 socket_ipv4_multicast_memberships[i].sock = NULL;
4035 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4036 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4037 break;
4038 }
4039 }
4040 done_socket(sock);
4041 }
4042
4043 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4044 *
4045 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4046 */
4047 static void
lwip_socket_drop_registered_memberships(int s)4048 lwip_socket_drop_registered_memberships(int s)
4049 {
4050 struct lwip_sock *sock = get_socket(s);
4051 int i;
4052
4053 if (!sock) {
4054 return;
4055 }
4056
4057 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4058 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4059 ip_addr_t multi_addr, if_addr;
4060 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4061 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4062 socket_ipv4_multicast_memberships[i].sock = NULL;
4063 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4064 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4065
4066 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4067 }
4068 }
4069 done_socket(sock);
4070 }
4071 #endif /* LWIP_IGMP */
4072
4073 #if LWIP_IPV6_MLD
4074 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4075 *
4076 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4077 *
4078 * @return 1 on success, 0 on failure
4079 */
4080 static int
lwip_socket_register_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4081 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4082 {
4083 struct lwip_sock *sock = get_socket(s);
4084 int i;
4085
4086 if (!sock) {
4087 return 0;
4088 }
4089
4090 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4091 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4092 socket_ipv6_multicast_memberships[i].sock = sock;
4093 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4094 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4095 done_socket(sock);
4096 return 1;
4097 }
4098 }
4099 done_socket(sock);
4100 return 0;
4101 }
4102
4103 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4104 * on socket close.
4105 *
4106 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4107 */
4108 static void
lwip_socket_unregister_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4109 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4110 {
4111 struct lwip_sock *sock = get_socket(s);
4112 int i;
4113
4114 if (!sock) {
4115 return;
4116 }
4117
4118 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4119 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4120 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4121 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4122 socket_ipv6_multicast_memberships[i].sock = NULL;
4123 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4124 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4125 break;
4126 }
4127 }
4128 done_socket(sock);
4129 }
4130
4131 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4132 *
4133 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4134 */
4135 static void
lwip_socket_drop_registered_mld6_memberships(int s)4136 lwip_socket_drop_registered_mld6_memberships(int s)
4137 {
4138 struct lwip_sock *sock = get_socket(s);
4139 int i;
4140
4141 if (!sock) {
4142 return;
4143 }
4144
4145 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4146 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4147 ip_addr_t multi_addr;
4148 u8_t if_idx;
4149
4150 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4151 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4152
4153 socket_ipv6_multicast_memberships[i].sock = NULL;
4154 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4155 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4156
4157 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4158 }
4159 }
4160 done_socket(sock);
4161 }
4162 #endif /* LWIP_IPV6_MLD */
4163
4164 #endif /* LWIP_SOCKET */
4165