1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/sys.h"
48 #include "lwip/igmp.h"
49 #include "lwip/mld6.h"
50 #include "lwip/inet.h"
51 #include "lwip/tcp.h"
52 #include "lwip/raw.h"
53 #include "lwip/udp.h"
54 #include "lwip/memp.h"
55 #include "lwip/pbuf.h"
56 #include "lwip/netif.h"
57 #include "lwip/priv/tcpip_priv.h"
58 #include "lwip/priv/api_msg.h"
59 #if LWIP_CHECKSUM_ON_COPY
60 #include "lwip/inet_chksum.h"
61 #endif
62
63 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
64 #include <stdarg.h>
65 #endif
66
67 #include <string.h>
68
69 #ifdef LWIP_HOOK_FILENAME
70 #include LWIP_HOOK_FILENAME
71 #endif
72
73 /* If the netconn API is not required publicly, then we include the necessary
74 files here to get the implementation */
75 #if !LWIP_NETCONN
76 #undef LWIP_NETCONN
77 #define LWIP_NETCONN 1
78 #include "api_msg.c"
79 #include "api_lib.c"
80 #include "netbuf.c"
81 #undef LWIP_NETCONN
82 #define LWIP_NETCONN 0
83 #endif
84
85 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
86 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
87 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
88 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
89
90 /** (sin)->sin_len = sizeof(struct sockaddr_in); */
91 #if LWIP_IPV4
92 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
93 (sin)->sin_family = AF_INET; \
94 (sin)->sin_port = lwip_htons((port)); \
95 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
96 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
97 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
98 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
99 (port) = lwip_ntohs((sin)->sin_port); }while(0)
100 #endif /* LWIP_IPV4 */
101
102 #if LWIP_IPV6
103 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
104 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
105 (sin6)->sin6_family = AF_INET6; \
106 (sin6)->sin6_port = lwip_htons((port)); \
107 (sin6)->sin6_flowinfo = 0; \
108 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
109 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
110 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
111 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
112 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
113 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
114 } \
115 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
116 #endif /* LWIP_IPV6 */
117
118 #if LWIP_IPV4 && LWIP_IPV6
119 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
120
121 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
122 ((namelen) == sizeof(struct sockaddr_in6)))
123 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
124 ((name)->sa_family == AF_INET6))
125 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
126 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
127 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
128 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
129 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
130 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
131 } else { \
132 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
133 } } while(0)
134 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
135 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
136 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
137 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
138 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
139 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
140 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
141 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
142 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
143 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
144 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
145 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
146 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
147 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
148 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
149 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
150 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
151 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
152 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
153 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
154 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
155 #endif /* LWIP_IPV6 */
156
157 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
158 IS_SOCK_ADDR_TYPE_VALID(name))
159 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
160 SOCK_ADDR_TYPE_MATCH(name, sock))
161 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
162
163
164 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
165 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
166 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
167 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
168 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
169 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
170 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
171 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
172 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
173 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
174
175
176 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
177 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
178 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
179 #if LWIP_MPU_COMPATIBLE
180 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
181 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
182 if (name == NULL) { \
183 sock_set_errno(sock, ENOMEM); \
184 done_socket(sock); \
185 return -1; \
186 } }while(0)
187 #else /* LWIP_MPU_COMPATIBLE */
188 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
189 #endif /* LWIP_MPU_COMPATIBLE */
190
191 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
192 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
193 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
194 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
195 #else
196 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
197 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
198 u32_t loc = (val); \
199 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
200 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
201 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
202 #endif
203
204
205 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
206 * sockaddr_in6 if instantiated.
207 */
208 union sockaddr_aligned {
209 struct sockaddr sa;
210 #if LWIP_IPV6
211 struct sockaddr_in6 sin6;
212 #endif /* LWIP_IPV6 */
213 #if LWIP_IPV4
214 struct sockaddr_in sin;
215 #endif /* LWIP_IPV4 */
216 };
217
218 /* Define the number of IPv4 multicast memberships, default is one per socket */
219 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
220 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
221 #endif
222
223 #if LWIP_IGMP
224 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
225 a socket is closed */
226 struct lwip_socket_multicast_pair {
227 /** the socket */
228 struct lwip_sock *sock;
229 /** the interface address */
230 ip4_addr_t if_addr;
231 /** the group address */
232 ip4_addr_t multi_addr;
233 };
234
235 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
236
237 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
238 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
239 static void lwip_socket_drop_registered_memberships(int s);
240 #endif /* LWIP_IGMP */
241
242 #if LWIP_IPV6_MLD
243 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
244 a socket is closed */
245 struct lwip_socket_multicast_mld6_pair {
246 /** the socket */
247 struct lwip_sock *sock;
248 /** the interface index */
249 u8_t if_idx;
250 /** the group address */
251 ip6_addr_t multi_addr;
252 };
253
254 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
255
256 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
257 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
258 static void lwip_socket_drop_registered_mld6_memberships(int s);
259 #endif /* LWIP_IPV6_MLD */
260
261 /** The global array of available sockets */
262 static struct lwip_sock sockets[NUM_SOCKETS];
263
264 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
265 #if LWIP_TCPIP_CORE_LOCKING
266 /* protect the select_cb_list using core lock */
267 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
268 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
269 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
270 #else /* LWIP_TCPIP_CORE_LOCKING */
271 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
272 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
273 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
274 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
275 /** This counter is increased from lwip_select when the list is changed
276 and checked in select_check_waiters to see if it has changed. */
277 static volatile int select_cb_ctr;
278 #endif /* LWIP_TCPIP_CORE_LOCKING */
279 /** The global list of tasks waiting for select */
280 static struct lwip_select_cb *select_cb_list;
281 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
282
283 #define sock_set_errno(sk, e) do { \
284 const int sockerr = (e); \
285 set_errno(sockerr); \
286 } while (0)
287
288 /* Forward declaration of some functions */
289 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
290 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
291 #define DEFAULT_SOCKET_EVENTCB event_callback
292 #if ESP_LWIP_SELECT
293 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent, struct lwip_sock *sock_select);
294 #else
295 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
296 #endif /* LWIP_SOCKET_SELECT */
297 #else
298 #define DEFAULT_SOCKET_EVENTCB NULL
299 #endif
300 #if !LWIP_TCPIP_CORE_LOCKING
301 static void lwip_getsockopt_callback(void *arg);
302 static void lwip_setsockopt_callback(void *arg);
303 #endif
304 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
305 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
306 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
307 union lwip_sock_lastdata *lastdata);
308 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
309
310 #if LWIP_IPV4 && LWIP_IPV6
311 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)312 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
313 {
314 if ((sockaddr->sa_family) == AF_INET6) {
315 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
316 ipaddr->type = IPADDR_TYPE_V6;
317 } else {
318 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
319 ipaddr->type = IPADDR_TYPE_V4;
320 }
321 }
322 #endif /* LWIP_IPV4 && LWIP_IPV6 */
323
324 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
325 void
lwip_socket_thread_init(void)326 lwip_socket_thread_init(void)
327 {
328 netconn_thread_init();
329 }
330
331 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
332 void
lwip_socket_thread_cleanup(void)333 lwip_socket_thread_cleanup(void)
334 {
335 netconn_thread_cleanup();
336 }
337
338 #if LWIP_NETCONN_FULLDUPLEX
339 /* Thread-safe increment of sock->fd_used, with overflow check */
340 static int
sock_inc_used(struct lwip_sock * sock)341 sock_inc_used(struct lwip_sock *sock)
342 {
343 int ret;
344 #if !ESP_LWIP_LOCK
345 SYS_ARCH_DECL_PROTECT(lev);
346 #endif /* !ESP_LWIP_LOCK */
347
348 LWIP_ASSERT("sock != NULL", sock != NULL);
349
350 #if ESP_LWIP_LOCK
351 SYS_ARCH_PROTECT_SOCK(sock);
352 #else
353 SYS_ARCH_PROTECT(lev);
354 #endif /* ESP_LWIP_LOCK */
355 if (sock->fd_free_pending) {
356 /* prevent new usage of this socket if free is pending */
357 ret = 0;
358 } else {
359 ++sock->fd_used;
360 ret = 1;
361 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
362 }
363 #if ESP_LWIP_LOCK
364 SYS_ARCH_UNPROTECT_SOCK(sock);
365 #else
366 SYS_ARCH_UNPROTECT(lev);
367 #endif /* ESP_LWIP_LOCK */
368 return ret;
369 }
370
371 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
372 static int
sock_inc_used_locked(struct lwip_sock * sock)373 sock_inc_used_locked(struct lwip_sock *sock)
374 {
375 LWIP_ASSERT("sock != NULL", sock != NULL);
376
377 #if ESP_LWIP_LOCK
378 SYS_ARCH_PROTECT_SOCK(sock);
379 #endif /* ESP_LWIP_LOCK */
380 if (sock->fd_free_pending) {
381 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
382 #if ESP_LWIP_LOCK
383 SYS_ARCH_UNPROTECT_SOCK(sock);
384 #endif /* ESP_LWIP_LOCK */
385 return 0;
386 }
387
388 ++sock->fd_used;
389 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
390 #if ESP_LWIP_LOCK
391 SYS_ARCH_UNPROTECT_SOCK(sock);
392 #endif /* ESP_LWIP_LOCK */
393 return 1;
394 }
395
396 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
397 * released (and possibly reused) when used from more than one thread
398 * (e.g. read-while-write or close-while-write, etc)
399 * This function is called at the end of functions using (try)get_socket*().
400 */
401 static void
done_socket(struct lwip_sock * sock)402 done_socket(struct lwip_sock *sock)
403 {
404 int freed = 0;
405 int is_tcp = 0;
406 struct netconn *conn = NULL;
407 union lwip_sock_lastdata lastdata;
408 #if !ESP_LWIP_LOCK
409 SYS_ARCH_DECL_PROTECT(lev);
410 #endif /* !ESP_LWIP_LOCK */
411 LWIP_ASSERT("sock != NULL", sock != NULL);
412
413 #if ESP_LWIP_LOCK
414 SYS_ARCH_PROTECT_SOCK(sock);
415 #else
416 SYS_ARCH_PROTECT(lev);
417 #endif /* ESP_LWIP_LOCK */
418 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
419 if (--sock->fd_used == 0) {
420 if (sock->fd_free_pending) {
421 /* free the socket */
422 sock->fd_used = 1;
423 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
424 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
425 }
426 }
427 #if ESP_LWIP_LOCK
428 SYS_ARCH_UNPROTECT_SOCK(sock);
429 #else
430 SYS_ARCH_UNPROTECT(lev);
431 #endif /* ESP_LWIP_LOCK */
432
433 if (freed) {
434 free_socket_free_elements(is_tcp, conn, &lastdata);
435 }
436 }
437
438 #else /* LWIP_NETCONN_FULLDUPLEX */
439 #define sock_inc_used(sock) 1
440 #define sock_inc_used_locked(sock) 1
441 #define done_socket(sock)
442 #endif /* LWIP_NETCONN_FULLDUPLEX */
443
444 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
445 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)446 tryget_socket_unconn_nouse(int fd)
447 {
448 int s = fd - LWIP_SOCKET_OFFSET;
449 if ((s < 0) || (s >= NUM_SOCKETS)) {
450 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
451 return NULL;
452 }
453 return &sockets[s];
454 }
455
456 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)457 lwip_socket_dbg_get_socket(int fd)
458 {
459 return tryget_socket_unconn_nouse(fd);
460 }
461
462 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
463 static struct lwip_sock *
tryget_socket_unconn(int fd)464 tryget_socket_unconn(int fd)
465 {
466 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
467 if (ret != NULL) {
468 #if ESP_LWIP_LOCK
469 if (ret->conn == NULL)
470 return NULL;
471 #endif /* ESP_LWIP_LOCK */
472 if (!sock_inc_used(ret)) {
473 return NULL;
474 }
475 }
476 return ret;
477 }
478
479 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
480 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)481 tryget_socket_unconn_locked(int fd)
482 {
483 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
484 if (ret != NULL) {
485 #if ESP_LWIP
486 if (ret->conn == NULL)
487 return NULL;
488 #endif /* ESP_LWIP */
489 if (!sock_inc_used_locked(ret)) {
490 return NULL;
491 }
492 }
493 return ret;
494 }
495
496 /**
497 * Same as get_socket but doesn't set errno
498 *
499 * @param fd externally used socket index
500 * @return struct lwip_sock for the socket or NULL if not found
501 */
502 static struct lwip_sock *
tryget_socket(int fd)503 tryget_socket(int fd)
504 {
505 struct lwip_sock *sock = tryget_socket_unconn(fd);
506 if (sock != NULL) {
507 if (sock->conn) {
508 return sock;
509 }
510 done_socket(sock);
511 }
512 return NULL;
513 }
514
515 /**
516 * Map a externally used socket index to the internal socket representation.
517 *
518 * @param fd externally used socket index
519 * @return struct lwip_sock for the socket or NULL if not found
520 */
521 static struct lwip_sock *
get_socket(int fd)522 get_socket(int fd)
523 {
524 struct lwip_sock *sock = tryget_socket(fd);
525 if (!sock) {
526 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
527 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
528 }
529 set_errno(EBADF);
530 return NULL;
531 }
532 return sock;
533 }
534
535 /**
536 * Allocate a new socket for a given netconn.
537 *
538 * @param newconn the netconn for which to allocate a socket
539 * @param accepted 1 if socket has been created by accept(),
540 * 0 if socket has been created by socket()
541 * @return the index of the new socket; -1 on error
542 */
543 static int
alloc_socket(struct netconn * newconn,int accepted)544 alloc_socket(struct netconn *newconn, int accepted)
545 {
546 int i;
547 SYS_ARCH_DECL_PROTECT(lev);
548 LWIP_UNUSED_ARG(accepted);
549
550 /* allocate a new socket identifier */
551 for (i = 0; i < NUM_SOCKETS; ++i) {
552 /* Protect socket array */
553 SYS_ARCH_PROTECT(lev);
554 if (!sockets[i].conn) {
555 #if LWIP_NETCONN_FULLDUPLEX
556 if (sockets[i].fd_used) {
557 SYS_ARCH_UNPROTECT(lev);
558 continue;
559 }
560 sockets[i].fd_used = 1;
561 sockets[i].fd_free_pending = 0;
562 #endif
563 sockets[i].conn = newconn;
564 /* The socket is not yet known to anyone, so no need to protect
565 after having marked it as used. */
566 SYS_ARCH_UNPROTECT(lev);
567 sockets[i].lastdata.pbuf = NULL;
568 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
569 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
570 sockets[i].rcvevent = 0;
571 /* TCP sendbuf is empty, but the socket is not yet writable until connected
572 * (unless it has been created by accept()). */
573 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
574 sockets[i].errevent = 0;
575 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
576 #if ESP_LWIP_LOCK
577 if (!sockets[i].lock) {
578 /* one time init and never free */
579 if (sys_mutex_new(&sockets[i].lock) != ERR_OK) {
580 return -1;
581 }
582 }
583 #endif
584 return i + LWIP_SOCKET_OFFSET;
585 }
586 SYS_ARCH_UNPROTECT(lev);
587 }
588 return -1;
589 }
590
591 /** Free a socket (under lock)
592 *
593 * @param sock the socket to free
594 * @param is_tcp != 0 for TCP sockets, used to free lastdata
595 * @param conn the socekt's netconn is stored here, must be freed externally
596 * @param lastdata lastdata is stored here, must be freed externally
597 */
598 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)599 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
600 union lwip_sock_lastdata *lastdata)
601 {
602 #if LWIP_NETCONN_FULLDUPLEX
603 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
604 sock->fd_used--;
605 if (sock->fd_used > 0) {
606 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
607 return 0;
608 }
609 #else /* LWIP_NETCONN_FULLDUPLEX */
610 LWIP_UNUSED_ARG(is_tcp);
611 #endif /* LWIP_NETCONN_FULLDUPLEX */
612
613 *lastdata = sock->lastdata;
614 sock->lastdata.pbuf = NULL;
615 *conn = sock->conn;
616 sock->conn = NULL;
617 return 1;
618 }
619
620 /** Free a socket's leftover members.
621 */
622 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)623 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
624 {
625 if (lastdata->pbuf != NULL) {
626 if (is_tcp) {
627 pbuf_free(lastdata->pbuf);
628 } else {
629 netbuf_delete(lastdata->netbuf);
630 }
631 }
632 if (conn != NULL) {
633 /* netconn_prepare_delete() has already been called, here we only free the conn */
634 netconn_delete(conn);
635 }
636 }
637
638 /** Free a socket. The socket's netconn must have been
639 * delete before!
640 *
641 * @param sock the socket to free
642 * @param is_tcp != 0 for TCP sockets, used to free lastdata
643 */
644 static void
free_socket(struct lwip_sock * sock,int is_tcp)645 free_socket(struct lwip_sock *sock, int is_tcp)
646 {
647 int freed;
648 struct netconn *conn;
649 union lwip_sock_lastdata lastdata;
650 #if !ESP_LWIP_LOCK
651 SYS_ARCH_DECL_PROTECT(lev);
652 #endif /* !ESP_LWIP_LOCK */
653
654 /* Protect socket array */
655 #if ESP_LWIP_LOCK
656 SYS_ARCH_PROTECT_SOCK(sock);
657 #else
658 SYS_ARCH_PROTECT(lev);
659 #endif /* ESP_LWIP_LOCK */
660
661 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
662 #if ESP_LWIP_LOCK
663 SYS_ARCH_UNPROTECT_SOCK(sock);
664 #else
665 SYS_ARCH_UNPROTECT(lev);
666 #endif /* ESP_LWIP_LOCK */
667 /* don't use 'sock' after this line, as another task might have allocated it */
668
669 if (freed) {
670 free_socket_free_elements(is_tcp, conn, &lastdata);
671 }
672 }
673
674 /* Below this, the well-known socket functions are implemented.
675 * Use google.com or opengroup.org to get a good description :-)
676 *
677 * Exceptions are documented!
678 */
679
680 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)681 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
682 {
683 struct lwip_sock *sock, *nsock;
684 struct netconn *newconn;
685 ip_addr_t naddr;
686 u16_t port = 0;
687 int newsock;
688 err_t err;
689 int recvevent;
690 SYS_ARCH_DECL_PROTECT(lev);
691
692 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
693 sock = get_socket(s);
694 if (!sock) {
695 return -1;
696 }
697
698 /* wait for a new connection */
699 err = netconn_accept(sock->conn, &newconn);
700 if (err != ERR_OK) {
701 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
702 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
703 sock_set_errno(sock, EOPNOTSUPP);
704 } else if (err == ERR_CLSD) {
705 sock_set_errno(sock, EINVAL);
706 } else {
707 sock_set_errno(sock, err_to_errno(err));
708 }
709 done_socket(sock);
710 return -1;
711 }
712 LWIP_ASSERT("newconn != NULL", newconn != NULL);
713
714 newsock = alloc_socket(newconn, 1);
715 if (newsock == -1) {
716 netconn_delete(newconn);
717 sock_set_errno(sock, ENFILE);
718 done_socket(sock);
719 return -1;
720 }
721 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
722 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
723
724 /* See event_callback: If data comes in right away after an accept, even
725 * though the server task might not have created a new socket yet.
726 * In that case, newconn->socket is counted down (newconn->socket--),
727 * so nsock->rcvevent is >= 1 here!
728 */
729 SYS_ARCH_PROTECT(lev);
730 recvevent = (s16_t)(-1 - newconn->socket);
731 newconn->socket = newsock;
732 SYS_ARCH_UNPROTECT(lev);
733
734 if (newconn->callback) {
735 LOCK_TCPIP_CORE();
736 while (recvevent > 0) {
737 recvevent--;
738 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
739 }
740 UNLOCK_TCPIP_CORE();
741 }
742
743 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
744 * not be NULL if addr is valid.
745 */
746 if ((addr != NULL) && (addrlen != NULL)) {
747 union sockaddr_aligned tempaddr;
748 /* get the IP address and port of the remote host */
749 err = netconn_peer(newconn, &naddr, &port);
750 if (err != ERR_OK) {
751 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
752 free_socket(nsock, 1);
753 sock_set_errno(sock, err_to_errno(err));
754 done_socket(sock);
755 return -1;
756 }
757
758 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
759 /** tempaddr.sa.sa_len */
760 if (*addrlen > sizeof(struct sockaddr_in)) {
761 *addrlen = sizeof(struct sockaddr_in);
762 }
763 MEMCPY(addr, &tempaddr, *addrlen);
764
765 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
766 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
767 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
768 } else {
769 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
770 }
771
772 sock_set_errno(sock, 0);
773 done_socket(sock);
774 done_socket(nsock);
775 return newsock;
776 }
777
778 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)779 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
780 {
781 struct lwip_sock *sock;
782 ip_addr_t local_addr;
783 u16_t local_port;
784 err_t err;
785
786 sock = get_socket(s);
787 if (!sock) {
788 return -1;
789 }
790
791 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
792 /* sockaddr does not match socket type (IPv4/IPv6) */
793 sock_set_errno(sock, err_to_errno(ERR_VAL));
794 done_socket(sock);
795 return -1;
796 }
797
798 /* check size, family and alignment of 'name' */
799 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
800 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
801 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
802 LWIP_UNUSED_ARG(namelen);
803
804 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
805 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
806 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
807 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
808
809 #if LWIP_IPV4 && LWIP_IPV6
810 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
811 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
812 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
813 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
814 }
815 #endif /* LWIP_IPV4 && LWIP_IPV6 */
816
817 err = netconn_bind(sock->conn, &local_addr, local_port);
818
819 if (err != ERR_OK) {
820 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
821 sock_set_errno(sock, err_to_errno(err));
822 done_socket(sock);
823 return -1;
824 }
825
826 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
827 sock_set_errno(sock, 0);
828 done_socket(sock);
829 return 0;
830 }
831
832 int
lwip_close(int s)833 lwip_close(int s)
834 {
835 struct lwip_sock *sock;
836 int is_tcp = 0;
837 err_t err;
838
839 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
840
841 sock = get_socket(s);
842 if (!sock) {
843 return -1;
844 }
845
846 if (sock->conn != NULL) {
847 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
848 } else {
849 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
850 }
851
852 #if LWIP_IGMP
853 /* drop all possibly joined IGMP memberships */
854 lwip_socket_drop_registered_memberships(s);
855 #endif /* LWIP_IGMP */
856 #if LWIP_IPV6_MLD
857 /* drop all possibly joined MLD6 memberships */
858 lwip_socket_drop_registered_mld6_memberships(s);
859 #endif /* LWIP_IPV6_MLD */
860
861 err = netconn_prepare_delete(sock->conn);
862 if (err != ERR_OK) {
863 sock_set_errno(sock, err_to_errno(err));
864 done_socket(sock);
865 return -1;
866 }
867
868 free_socket(sock, is_tcp);
869 set_errno(0);
870 return 0;
871 }
872
873 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)874 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
875 {
876 struct lwip_sock *sock;
877 err_t err;
878
879 sock = get_socket(s);
880 if (!sock) {
881 return -1;
882 }
883
884 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
885 /* sockaddr does not match socket type (IPv4/IPv6) */
886 sock_set_errno(sock, err_to_errno(ERR_VAL));
887 done_socket(sock);
888 return -1;
889 }
890
891 LWIP_UNUSED_ARG(namelen);
892 if (name->sa_family == AF_UNSPEC) {
893 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
894 err = netconn_disconnect(sock->conn);
895 } else {
896 ip_addr_t remote_addr;
897 u16_t remote_port;
898
899 /* check size, family and alignment of 'name' */
900 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
901 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
902 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
903
904 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
905 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
906 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
907 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
908
909 #if LWIP_IPV4 && LWIP_IPV6
910 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
911 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
912 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
913 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
914 }
915 #endif /* LWIP_IPV4 && LWIP_IPV6 */
916
917 err = netconn_connect(sock->conn, &remote_addr, remote_port);
918 }
919
920 if (err != ERR_OK) {
921 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
922 sock_set_errno(sock, err_to_errno(err));
923 done_socket(sock);
924 return -1;
925 }
926
927 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
928 sock_set_errno(sock, 0);
929 done_socket(sock);
930 return 0;
931 }
932
933 /**
934 * Set a socket into listen mode.
935 * The socket may not have been used for another connection previously.
936 *
937 * @param s the socket to set to listening mode
938 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
939 * @return 0 on success, non-zero on failure
940 */
941 int
lwip_listen(int s,int backlog)942 lwip_listen(int s, int backlog)
943 {
944 struct lwip_sock *sock;
945 err_t err;
946
947 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
948
949 sock = get_socket(s);
950 if (!sock) {
951 return -1;
952 }
953
954 /* limit the "backlog" parameter to fit in an u8_t */
955 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
956
957 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
958
959 if (err != ERR_OK) {
960 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
961 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
962 sock_set_errno(sock, EOPNOTSUPP);
963 } else {
964 sock_set_errno(sock, err_to_errno(err));
965 }
966 done_socket(sock);
967 return -1;
968 }
969
970 sock_set_errno(sock, 0);
971 done_socket(sock);
972 return 0;
973 }
974
975 #if LWIP_TCP
976 /* Helper function to loop over receiving pbufs from netconn
977 * until "len" bytes are received or we're otherwise done.
978 * Keeps sock->lastdata for peeking or partly copying.
979 */
980 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)981 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
982 {
983 u8_t apiflags = NETCONN_NOAUTORCVD;
984 ssize_t recvd = 0;
985 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
986
987 LWIP_ASSERT("no socket given", sock != NULL);
988 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
989
990 if (flags & MSG_DONTWAIT) {
991 apiflags |= NETCONN_DONTBLOCK;
992 }
993
994 do {
995 struct pbuf *p;
996 err_t err;
997 u16_t copylen;
998
999 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
1000 /* Check if there is data left from the last recv operation. */
1001 if (sock->lastdata.pbuf) {
1002 p = sock->lastdata.pbuf;
1003 } else {
1004 /* No data was left from the previous operation, so we try to get
1005 some from the network. */
1006 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
1007 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
1008 err, (void *)p));
1009
1010 if (err != ERR_OK) {
1011 if (recvd > 0) {
1012 /* already received data, return that (this trusts in getting the same error from
1013 netconn layer again next time netconn_recv is called) */
1014 goto lwip_recv_tcp_done;
1015 }
1016 /* We should really do some error checking here. */
1017 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
1018 lwip_strerr(err)));
1019 sock_set_errno(sock, err_to_errno(err));
1020 if (err == ERR_CLSD) {
1021 return 0;
1022 } else {
1023 return -1;
1024 }
1025 }
1026 LWIP_ASSERT("p != NULL", p != NULL);
1027 sock->lastdata.pbuf = p;
1028 }
1029
1030 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
1031 p->tot_len, (int)recv_left, (int)recvd));
1032
1033 if (recv_left > p->tot_len) {
1034 copylen = p->tot_len;
1035 } else {
1036 copylen = (u16_t)recv_left;
1037 }
1038 if (recvd + copylen < recvd) {
1039 /* overflow */
1040 copylen = (u16_t)(SSIZE_MAX - recvd);
1041 }
1042
1043 /* copy the contents of the received buffer into
1044 the supplied memory pointer mem */
1045 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
1046
1047 recvd += copylen;
1048
1049 /* TCP combines multiple pbufs for one recv */
1050 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
1051 recv_left -= copylen;
1052
1053 /* Unless we peek the incoming message... */
1054 if ((flags & MSG_PEEK) == 0) {
1055 /* ... check if there is data left in the pbuf */
1056 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
1057 if (p->tot_len - copylen > 0) {
1058 /* If so, it should be saved in the sock structure for the next recv call.
1059 We store the pbuf but hide/free the consumed data: */
1060 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1061 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1062 } else {
1063 sock->lastdata.pbuf = NULL;
1064 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1065 pbuf_free(p);
1066 }
1067 }
1068 /* once we have some data to return, only add more if we don't need to wait */
1069 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1070 /* @todo: do we need to support peeking more than one pbuf? */
1071 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1072 lwip_recv_tcp_done:
1073 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1074 /* ensure window update after copying all data */
1075 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1076 }
1077 sock_set_errno(sock, 0);
1078 return recvd;
1079 }
1080 #endif
1081
1082 /* Convert a netbuf's address data to struct sockaddr */
1083 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1084 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1085 struct sockaddr *from, socklen_t *fromlen)
1086 {
1087 int truncated = 0;
1088 union sockaddr_aligned saddr;
1089
1090 LWIP_UNUSED_ARG(conn);
1091
1092 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1093 LWIP_ASSERT("from != NULL", from != NULL);
1094 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1095
1096 #if LWIP_IPV4 && LWIP_IPV6
1097 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1098 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1099 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1100 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1101 }
1102 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1103
1104 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1105 /** saddr.sa.sa_len */
1106 if (*fromlen < sizeof(struct sockaddr_in)) {
1107 truncated = 1;
1108 } else if (*fromlen > sizeof(struct sockaddr_in)) {
1109 *fromlen = sizeof(struct sockaddr_in);
1110 }
1111 MEMCPY(from, &saddr, *fromlen);
1112 return truncated;
1113 }
1114
1115 #if LWIP_TCP
1116 /* Helper function to get a tcp socket's remote address info */
1117 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1118 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1119 {
1120 if (sock == NULL) {
1121 return 0;
1122 }
1123 LWIP_UNUSED_ARG(dbg_fn);
1124 LWIP_UNUSED_ARG(dbg_s);
1125 LWIP_UNUSED_ARG(dbg_ret);
1126
1127 #if !SOCKETS_DEBUG
1128 if (from && fromlen)
1129 #endif /* !SOCKETS_DEBUG */
1130 {
1131 /* get remote addr/port from tcp_pcb */
1132 u16_t port;
1133 ip_addr_t tmpaddr;
1134 netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1135 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1136 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1137 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1138 if (from && fromlen) {
1139 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1140 }
1141 }
1142 return 0;
1143 }
1144 #endif
1145
1146 /* Helper function to receive a netbuf from a udp or raw netconn.
1147 * Keeps sock->lastdata for peeking.
1148 */
1149 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1150 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1151 {
1152 struct netbuf *buf;
1153 u8_t apiflags;
1154 err_t err;
1155 u16_t buflen, copylen, copied;
1156 int i;
1157
1158 LWIP_UNUSED_ARG(dbg_s);
1159 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1160
1161 if (flags & MSG_DONTWAIT) {
1162 apiflags = NETCONN_DONTBLOCK;
1163 } else {
1164 apiflags = 0;
1165 }
1166
1167 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1168 /* Check if there is data left from the last recv operation. */
1169 buf = sock->lastdata.netbuf;
1170 if (buf == NULL) {
1171 /* No data was left from the previous operation, so we try to get
1172 some from the network. */
1173 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1174 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1175 err, (void *)buf));
1176
1177 if (err != ERR_OK) {
1178 return err;
1179 }
1180 LWIP_ASSERT("buf != NULL", buf != NULL);
1181 sock->lastdata.netbuf = buf;
1182 }
1183 buflen = buf->p->tot_len;
1184 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1185
1186 copied = 0;
1187 /* copy the pbuf payload into the iovs */
1188 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1189 u16_t len_left = (u16_t)(buflen - copied);
1190 if (msg->msg_iov[i].iov_len > len_left) {
1191 copylen = len_left;
1192 } else {
1193 copylen = (u16_t)msg->msg_iov[i].iov_len;
1194 }
1195
1196 /* copy the contents of the received buffer into
1197 the supplied memory buffer */
1198 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1199 copied = (u16_t)(copied + copylen);
1200 }
1201
1202 /* Check to see from where the data was.*/
1203 #if !SOCKETS_DEBUG
1204 if (msg->msg_name && msg->msg_namelen)
1205 #endif /* !SOCKETS_DEBUG */
1206 {
1207 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1208 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1209 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1210 if (msg->msg_name && msg->msg_namelen) {
1211 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1212 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1213 }
1214 }
1215
1216 /* Initialize flag output */
1217 msg->msg_flags = 0;
1218
1219 if (msg->msg_control) {
1220 u8_t wrote_msg = 0;
1221 #if LWIP_NETBUF_RECVINFO
1222 /* Check if packet info was recorded */
1223 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1224 if (IP_IS_V4(&buf->toaddr)) {
1225 #if LWIP_IPV4
1226 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1227 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1228 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1229 chdr->cmsg_level = IPPROTO_IP;
1230 chdr->cmsg_type = IP_PKTINFO;
1231 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1232 pkti->ipi_ifindex = buf->p->if_idx;
1233 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1234 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1235 wrote_msg = 1;
1236 } else {
1237 msg->msg_flags |= MSG_CTRUNC;
1238 }
1239 #endif /* LWIP_IPV4 */
1240 }
1241 }
1242 #endif /* LWIP_NETBUF_RECVINFO */
1243
1244 if (!wrote_msg) {
1245 msg->msg_controllen = 0;
1246 }
1247 }
1248
1249 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1250 if ((flags & MSG_PEEK) == 0) {
1251 sock->lastdata.netbuf = NULL;
1252 netbuf_delete(buf);
1253 }
1254 if (datagram_len) {
1255 *datagram_len = buflen;
1256 }
1257 return ERR_OK;
1258 }
1259
1260 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1261 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1262 struct sockaddr *from, socklen_t *fromlen)
1263 {
1264 struct lwip_sock *sock;
1265 ssize_t ret;
1266
1267 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1268 sock = get_socket(s);
1269 if (!sock) {
1270 return -1;
1271 }
1272 #if LWIP_TCP
1273 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1274 ret = lwip_recv_tcp(sock, mem, len, flags);
1275 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1276 done_socket(sock);
1277 return ret;
1278 } else
1279 #endif
1280 {
1281 u16_t datagram_len = 0;
1282 struct iovec vec;
1283 struct msghdr msg;
1284 err_t err;
1285 vec.iov_base = mem;
1286 vec.iov_len = len;
1287 msg.msg_control = NULL;
1288 msg.msg_controllen = 0;
1289 msg.msg_flags = 0;
1290 msg.msg_iov = &vec;
1291 msg.msg_iovlen = 1;
1292 msg.msg_name = from;
1293 msg.msg_namelen = (fromlen ? *fromlen : 0);
1294 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1295 if (err != ERR_OK) {
1296 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1297 s, lwip_strerr(err)));
1298 sock_set_errno(sock, err_to_errno(err));
1299 done_socket(sock);
1300 return -1;
1301 }
1302 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1303 if (fromlen) {
1304 *fromlen = msg.msg_namelen;
1305 }
1306 }
1307
1308 sock_set_errno(sock, 0);
1309 done_socket(sock);
1310 return ret;
1311 }
1312
1313 ssize_t
lwip_read(int s,void * mem,size_t len)1314 lwip_read(int s, void *mem, size_t len)
1315 {
1316 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1317 }
1318
1319 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1320 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1321 {
1322 struct msghdr msg;
1323
1324 msg.msg_name = NULL;
1325 msg.msg_namelen = 0;
1326 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1327 Blame the opengroup standard for this inconsistency. */
1328 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1329 msg.msg_iovlen = iovcnt;
1330 msg.msg_control = NULL;
1331 msg.msg_controllen = 0;
1332 msg.msg_flags = 0;
1333 return lwip_recvmsg(s, &msg, 0);
1334 }
1335
1336 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1337 lwip_recv(int s, void *mem, size_t len, int flags)
1338 {
1339 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1340 }
1341
1342 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1343 lwip_recvmsg(int s, struct msghdr *message, int flags)
1344 {
1345 struct lwip_sock *sock;
1346 int i;
1347 ssize_t buflen;
1348
1349 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1350 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1351 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1352 set_errno(EOPNOTSUPP); return -1;);
1353
1354 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1355 set_errno(EMSGSIZE);
1356 return -1;
1357 }
1358
1359 sock = get_socket(s);
1360 if (!sock) {
1361 return -1;
1362 }
1363
1364 /* check for valid vectors */
1365 buflen = 0;
1366 for (i = 0; i < message->msg_iovlen; i++) {
1367 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1368 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1369 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1370 sock_set_errno(sock, err_to_errno(ERR_VAL));
1371 done_socket(sock);
1372 return -1;
1373 }
1374 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1375 }
1376
1377 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1378 #if LWIP_TCP
1379 int recv_flags = flags;
1380 message->msg_flags = 0;
1381 /* recv the data */
1382 buflen = 0;
1383 for (i = 0; i < message->msg_iovlen; i++) {
1384 /* try to receive into this vector's buffer */
1385 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1386 if (recvd_local > 0) {
1387 /* sum up received bytes */
1388 buflen += recvd_local;
1389 }
1390 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1391 (flags & MSG_PEEK)) {
1392 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1393 if (buflen <= 0) {
1394 /* nothing received at all, propagate the error */
1395 buflen = recvd_local;
1396 }
1397 break;
1398 }
1399 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1400 recv_flags |= MSG_DONTWAIT;
1401 }
1402 if (buflen > 0) {
1403 /* reset socket error since we have received something */
1404 sock_set_errno(sock, 0);
1405 }
1406 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1407 done_socket(sock);
1408 return buflen;
1409 #else /* LWIP_TCP */
1410 sock_set_errno(sock, err_to_errno(ERR_ARG));
1411 done_socket(sock);
1412 return -1;
1413 #endif /* LWIP_TCP */
1414 }
1415 /* else, UDP and RAW NETCONNs */
1416 #if LWIP_UDP || LWIP_RAW
1417 {
1418 u16_t datagram_len = 0;
1419 err_t err;
1420 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1421 if (err != ERR_OK) {
1422 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1423 s, lwip_strerr(err)));
1424 sock_set_errno(sock, err_to_errno(err));
1425 done_socket(sock);
1426 return -1;
1427 }
1428 if (datagram_len > buflen) {
1429 message->msg_flags |= MSG_TRUNC;
1430 }
1431
1432 sock_set_errno(sock, 0);
1433 done_socket(sock);
1434 return (int)datagram_len;
1435 }
1436 #else /* LWIP_UDP || LWIP_RAW */
1437 sock_set_errno(sock, err_to_errno(ERR_ARG));
1438 done_socket(sock);
1439 return -1;
1440 #endif /* LWIP_UDP || LWIP_RAW */
1441 }
1442
1443 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1444 lwip_send(int s, const void *data, size_t size, int flags)
1445 {
1446 struct lwip_sock *sock;
1447 err_t err;
1448 u8_t write_flags;
1449 size_t written;
1450
1451 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1452 s, data, size, flags));
1453
1454 sock = get_socket(s);
1455 if (!sock) {
1456 return -1;
1457 }
1458
1459 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1460 #if (LWIP_UDP || LWIP_RAW)
1461 done_socket(sock);
1462 return lwip_sendto(s, data, size, flags, NULL, 0);
1463 #else /* (LWIP_UDP || LWIP_RAW) */
1464 sock_set_errno(sock, err_to_errno(ERR_ARG));
1465 done_socket(sock);
1466 return -1;
1467 #endif /* (LWIP_UDP || LWIP_RAW) */
1468 }
1469
1470 write_flags = (u8_t)(NETCONN_COPY |
1471 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1472 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1473 written = 0;
1474 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1475
1476 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1477 sock_set_errno(sock, err_to_errno(err));
1478 done_socket(sock);
1479 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1480 return (err == ERR_OK ? (ssize_t)written : -1);
1481 }
1482
1483 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1484 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1485 {
1486 struct lwip_sock *sock;
1487 #if LWIP_TCP
1488 u8_t write_flags;
1489 size_t written;
1490 #endif
1491 err_t err = ERR_OK;
1492
1493 sock = get_socket(s);
1494 if (!sock) {
1495 return -1;
1496 }
1497
1498 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1499 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1500 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1501 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1502 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1503 sock_set_errno(sock, EMSGSIZE); done_socket(sock); return -1;);
1504 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1505 sock_set_errno(sock, EOPNOTSUPP); done_socket(sock); return -1;);
1506
1507 LWIP_UNUSED_ARG(msg->msg_control);
1508 LWIP_UNUSED_ARG(msg->msg_controllen);
1509 LWIP_UNUSED_ARG(msg->msg_flags);
1510
1511 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1512 #if LWIP_TCP
1513 write_flags = (u8_t)(NETCONN_COPY |
1514 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1515 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1516
1517 written = 0;
1518 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1519 sock_set_errno(sock, err_to_errno(err));
1520 done_socket(sock);
1521 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1522 return (err == ERR_OK ? (ssize_t)written : -1);
1523 #else /* LWIP_TCP */
1524 sock_set_errno(sock, err_to_errno(ERR_ARG));
1525 done_socket(sock);
1526 return -1;
1527 #endif /* LWIP_TCP */
1528 }
1529 /* else, UDP and RAW NETCONNs */
1530 #if LWIP_UDP || LWIP_RAW
1531 {
1532 struct netbuf chain_buf;
1533 int i;
1534 ssize_t size = 0;
1535
1536 LWIP_UNUSED_ARG(flags);
1537 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1538 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1539 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1540
1541 /* initialize chain buffer with destination */
1542 memset(&chain_buf, 0, sizeof(struct netbuf));
1543 if (msg->msg_name) {
1544 u16_t remote_port;
1545 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1546 netbuf_fromport(&chain_buf) = remote_port;
1547 }
1548 #if LWIP_NETIF_TX_SINGLE_PBUF
1549 for (i = 0; i < msg->msg_iovlen; i++) {
1550 size += msg->msg_iov[i].iov_len;
1551 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1552 /* overflow */
1553 goto sendmsg_emsgsize;
1554 }
1555 }
1556 if (size > 0xFFFF) {
1557 /* overflow */
1558 goto sendmsg_emsgsize;
1559 }
1560 /* Allocate a new netbuf and copy the data into it. */
1561 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1562 err = ERR_MEM;
1563 } else {
1564 /* flatten the IO vectors */
1565 size_t offset = 0;
1566 for (i = 0; i < msg->msg_iovlen; i++) {
1567 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1568 offset += msg->msg_iov[i].iov_len;
1569 }
1570 #if LWIP_CHECKSUM_ON_COPY
1571 {
1572 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1573 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1574 netbuf_set_chksum(&chain_buf, chksum);
1575 }
1576 #endif /* LWIP_CHECKSUM_ON_COPY */
1577 err = ERR_OK;
1578 }
1579 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1580 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1581 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1582 for (i = 0; i < msg->msg_iovlen; i++) {
1583 struct pbuf *p;
1584 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1585 /* overflow */
1586 goto sendmsg_emsgsize;
1587 }
1588 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1589 if (p == NULL) {
1590 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1591 break;
1592 }
1593 p->payload = msg->msg_iov[i].iov_base;
1594 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1595 /* netbuf empty, add new pbuf */
1596 if (chain_buf.p == NULL) {
1597 chain_buf.p = chain_buf.ptr = p;
1598 /* add pbuf to existing pbuf chain */
1599 } else {
1600 if (chain_buf.p->tot_len + p->len > 0xffff) {
1601 /* overflow */
1602 pbuf_free(p);
1603 goto sendmsg_emsgsize;
1604 }
1605 pbuf_cat(chain_buf.p, p);
1606 }
1607 }
1608 /* save size of total chain */
1609 if (err == ERR_OK) {
1610 size = netbuf_len(&chain_buf);
1611 }
1612 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1613
1614 if (err == ERR_OK) {
1615 #if LWIP_IPV4 && LWIP_IPV6
1616 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1617 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1618 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1619 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1620 }
1621 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1622
1623 /* send the data */
1624 err = netconn_send(sock->conn, &chain_buf);
1625 }
1626
1627 /* deallocated the buffer */
1628 netbuf_free(&chain_buf);
1629
1630 sock_set_errno(sock, err_to_errno(err));
1631 done_socket(sock);
1632 return (err == ERR_OK ? size : -1);
1633 sendmsg_emsgsize:
1634 sock_set_errno(sock, EMSGSIZE);
1635 netbuf_free(&chain_buf);
1636 done_socket(sock);
1637 return -1;
1638 }
1639 #else /* LWIP_UDP || LWIP_RAW */
1640 sock_set_errno(sock, err_to_errno(ERR_ARG));
1641 done_socket(sock);
1642 return -1;
1643 #endif /* LWIP_UDP || LWIP_RAW */
1644 }
1645
1646 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1647 lwip_sendto(int s, const void *data, size_t size, int flags,
1648 const struct sockaddr *to, socklen_t tolen)
1649 {
1650 struct lwip_sock *sock;
1651 err_t err;
1652 u16_t short_size;
1653 u16_t remote_port;
1654 struct netbuf buf;
1655
1656 sock = get_socket(s);
1657 if (!sock) {
1658 return -1;
1659 }
1660
1661 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1662 #if LWIP_TCP
1663 done_socket(sock);
1664 return lwip_send(s, data, size, flags);
1665 #else /* LWIP_TCP */
1666 LWIP_UNUSED_ARG(flags);
1667 sock_set_errno(sock, err_to_errno(ERR_ARG));
1668 done_socket(sock);
1669 return -1;
1670 #endif /* LWIP_TCP */
1671 }
1672
1673 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1674 /* cannot fit into one datagram (at least for us) */
1675 sock_set_errno(sock, EMSGSIZE);
1676 done_socket(sock);
1677 return -1;
1678 }
1679 short_size = (u16_t)size;
1680 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1681 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1682 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1683 sock_set_errno(sock, err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1684 LWIP_UNUSED_ARG(tolen);
1685
1686 /* initialize a buffer */
1687 buf.p = buf.ptr = NULL;
1688 #if LWIP_CHECKSUM_ON_COPY
1689 buf.flags = 0;
1690 #endif /* LWIP_CHECKSUM_ON_COPY */
1691 if (to) {
1692 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1693 } else {
1694 remote_port = 0;
1695 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1696 }
1697 netbuf_fromport(&buf) = remote_port;
1698
1699
1700 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1701 s, data, short_size, flags));
1702 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1703 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1704
1705 /* make the buffer point to the data that should be sent */
1706 #if LWIP_NETIF_TX_SINGLE_PBUF
1707 /* Allocate a new netbuf and copy the data into it. */
1708 if (netbuf_alloc(&buf, short_size) == NULL) {
1709 err = ERR_MEM;
1710 } else {
1711 #if LWIP_CHECKSUM_ON_COPY
1712 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1713 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1714 netbuf_set_chksum(&buf, chksum);
1715 } else
1716 #endif /* LWIP_CHECKSUM_ON_COPY */
1717 {
1718 MEMCPY(buf.p->payload, data, short_size);
1719 }
1720 err = ERR_OK;
1721 }
1722 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1723 err = netbuf_ref(&buf, data, short_size);
1724 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1725 if (err == ERR_OK) {
1726 #if LWIP_IPV4 && LWIP_IPV6
1727 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1728 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1729 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1730 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1731 }
1732 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1733
1734 /* send the data */
1735 err = netconn_send(sock->conn, &buf);
1736 }
1737
1738 /* deallocated the buffer */
1739 netbuf_free(&buf);
1740
1741 sock_set_errno(sock, err_to_errno(err));
1742 done_socket(sock);
1743 return (err == ERR_OK ? short_size : -1);
1744 }
1745
1746 int
lwip_socket(int domain,int type,int protocol)1747 lwip_socket(int domain, int type, int protocol)
1748 {
1749 struct netconn *conn;
1750 int i;
1751
1752 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1753
1754 /* create a netconn */
1755 switch (type) {
1756 case SOCK_RAW:
1757 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1758 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1759 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1760 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1761 break;
1762 case SOCK_DGRAM:
1763 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1764 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1765 DEFAULT_SOCKET_EVENTCB);
1766 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1767 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1768 #if LWIP_NETBUF_RECVINFO
1769 if (conn) {
1770 /* netconn layer enables pktinfo by default, sockets default to off */
1771 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1772 }
1773 #endif /* LWIP_NETBUF_RECVINFO */
1774 break;
1775 case SOCK_STREAM:
1776 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1777 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1778 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1779 break;
1780 default:
1781 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1782 domain, type, protocol));
1783 set_errno(EINVAL);
1784 return -1;
1785 }
1786
1787 if (!conn) {
1788 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1789 set_errno(ENOBUFS);
1790 return -1;
1791 }
1792
1793 i = alloc_socket(conn, 0);
1794
1795 if (i == -1) {
1796 netconn_delete(conn);
1797 set_errno(ENFILE);
1798 return -1;
1799 }
1800 conn->socket = i;
1801 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1802 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1803 set_errno(0);
1804 return i;
1805 }
1806
1807 ssize_t
lwip_write(int s,const void * data,size_t size)1808 lwip_write(int s, const void *data, size_t size)
1809 {
1810 return lwip_send(s, data, size, 0);
1811 }
1812
1813 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1814 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1815 {
1816 struct msghdr msg;
1817
1818 msg.msg_name = NULL;
1819 msg.msg_namelen = 0;
1820 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1821 Blame the opengroup standard for this inconsistency. */
1822 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1823 msg.msg_iovlen = iovcnt;
1824 msg.msg_control = NULL;
1825 msg.msg_controllen = 0;
1826 msg.msg_flags = 0;
1827 return lwip_sendmsg(s, &msg, 0);
1828 }
1829
1830 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1831 /* Add select_cb to select_cb_list. */
1832 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1833 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1834 {
1835 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1836
1837 /* Protect the select_cb_list */
1838 LWIP_SOCKET_SELECT_PROTECT(lev);
1839
1840 /* Put this select_cb on top of list */
1841 select_cb->next = select_cb_list;
1842 if (select_cb_list != NULL) {
1843 select_cb_list->prev = select_cb;
1844 }
1845 select_cb_list = select_cb;
1846 #if !LWIP_TCPIP_CORE_LOCKING
1847 /* Increasing this counter tells select_check_waiters that the list has changed. */
1848 select_cb_ctr++;
1849 #endif
1850
1851 /* Now we can safely unprotect */
1852 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1853 }
1854
1855 /* Remove select_cb from select_cb_list. */
1856 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1857 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1858 {
1859 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1860
1861 /* Take us off the list */
1862 LWIP_SOCKET_SELECT_PROTECT(lev);
1863 if (select_cb->next != NULL) {
1864 select_cb->next->prev = select_cb->prev;
1865 }
1866 if (select_cb_list == select_cb) {
1867 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1868 select_cb_list = select_cb->next;
1869 } else {
1870 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1871 select_cb->prev->next = select_cb->next;
1872 }
1873 #if !LWIP_TCPIP_CORE_LOCKING
1874 /* Increasing this counter tells select_check_waiters that the list has changed. */
1875 select_cb_ctr++;
1876 #endif
1877 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1878 }
1879 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1880
1881 #if LWIP_SOCKET_SELECT
1882 /**
1883 * Go through the readset and writeset lists and see which socket of the sockets
1884 * set in the sets has events. On return, readset, writeset and exceptset have
1885 * the sockets enabled that had events.
1886 *
1887 * @param maxfdp1 the highest socket index in the sets
1888 * @param readset_in set of sockets to check for read events
1889 * @param writeset_in set of sockets to check for write events
1890 * @param exceptset_in set of sockets to check for error events
1891 * @param readset_out set of sockets that had read events
1892 * @param writeset_out set of sockets that had write events
1893 * @param exceptset_out set os sockets that had error events
1894 * @return number of sockets that had events (read/write/exception) (>= 0)
1895 */
1896 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1897 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1898 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1899 {
1900 int i, nready = 0;
1901 fd_set lreadset, lwriteset, lexceptset;
1902 struct lwip_sock *sock;
1903 SYS_ARCH_DECL_PROTECT(lev);
1904
1905 FD_ZERO(&lreadset);
1906 FD_ZERO(&lwriteset);
1907 FD_ZERO(&lexceptset);
1908
1909 /* Go through each socket in each list to count number of sockets which
1910 currently match */
1911 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1912 /* if this FD is not in the set, continue */
1913 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1914 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1915 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1916 continue;
1917 }
1918 /* First get the socket's status (protected)... */
1919 SYS_ARCH_PROTECT(lev);
1920 sock = tryget_socket_unconn_locked(i);
1921 if (sock != NULL) {
1922 void *lastdata = sock->lastdata.pbuf;
1923 s16_t rcvevent = sock->rcvevent;
1924 u16_t sendevent = sock->sendevent;
1925 u16_t errevent = sock->errevent;
1926 SYS_ARCH_UNPROTECT(lev);
1927
1928 /* ... then examine it: */
1929 /* See if netconn of this socket is ready for read */
1930 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1931 FD_SET(i, &lreadset);
1932 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1933 nready++;
1934 }
1935 /* See if netconn of this socket is ready for write */
1936 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1937 FD_SET(i, &lwriteset);
1938 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1939 nready++;
1940 }
1941 /* See if netconn of this socket had an error */
1942 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1943 FD_SET(i, &lexceptset);
1944 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1945 nready++;
1946 }
1947 done_socket(sock);
1948 } else {
1949 SYS_ARCH_UNPROTECT(lev);
1950 /* no a valid open socket */
1951 return -1;
1952 }
1953 }
1954 /* copy local sets to the ones provided as arguments */
1955 *readset_out = lreadset;
1956 *writeset_out = lwriteset;
1957 *exceptset_out = lexceptset;
1958
1959 LWIP_ASSERT("nready >= 0", nready >= 0);
1960 return nready;
1961 }
1962
1963 #if LWIP_NETCONN_FULLDUPLEX
1964 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1965 * All sockets are marked (and later unmarked), whether they are open or not.
1966 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1967 */
1968 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)1969 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1970 {
1971 SYS_ARCH_DECL_PROTECT(lev);
1972 if (fdset) {
1973 int i;
1974 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1975 /* if this FD is in the set, lock it (unless already done) */
1976 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1977 struct lwip_sock *sock;
1978 SYS_ARCH_PROTECT(lev);
1979 sock = tryget_socket_unconn_locked(i);
1980 if (sock != NULL) {
1981 /* leave the socket used until released by lwip_select_dec_sockets_used */
1982 FD_SET(i, used_sockets);
1983 }
1984 SYS_ARCH_UNPROTECT(lev);
1985 }
1986 }
1987 }
1988 }
1989
1990 /* Mark all sockets passed to select as used to prevent them from being freed
1991 * from other threads while select is running.
1992 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1993 * to unmark them correctly.
1994 */
1995 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)1996 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
1997 {
1998 FD_ZERO(used_sockets);
1999 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
2000 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
2001 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
2002 }
2003
2004 /* Let go all sockets that were marked as used when starting select */
2005 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)2006 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
2007 {
2008 int i;
2009 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
2010 /* if this FD is not in the set, continue */
2011 if (FD_ISSET(i, used_sockets)) {
2012 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
2013 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2014 if (sock != NULL) {
2015 done_socket(sock);
2016 }
2017 }
2018 }
2019 }
2020 #else /* LWIP_NETCONN_FULLDUPLEX */
2021 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
2022 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
2023 #endif /* LWIP_NETCONN_FULLDUPLEX */
2024
2025 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)2026 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
2027 struct timeval *timeout)
2028 {
2029 u32_t waitres = 0;
2030 int nready;
2031 fd_set lreadset, lwriteset, lexceptset;
2032 u32_t msectimeout;
2033 int i;
2034 int maxfdp2;
2035 #if LWIP_NETCONN_SEM_PER_THREAD
2036 int waited = 0;
2037 #endif
2038 #if LWIP_NETCONN_FULLDUPLEX
2039 fd_set used_sockets;
2040 #endif
2041 SYS_ARCH_DECL_PROTECT(lev);
2042
2043 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
2044 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
2045 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
2046 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
2047
2048 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
2049 set_errno(EINVAL);
2050 return -1;
2051 }
2052
2053 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
2054
2055 /* Go through each socket in each list to count number of sockets which
2056 currently match */
2057 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2058
2059 if (nready < 0) {
2060 /* one of the sockets in one of the fd_sets was invalid */
2061 set_errno(EBADF);
2062 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2063 return -1;
2064 } else if (nready > 0) {
2065 /* one or more sockets are set, no need to wait */
2066 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2067 } else {
2068 /* If we don't have any current events, then suspend if we are supposed to */
2069 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2070 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2071 /* This is OK as the local fdsets are empty and nready is zero,
2072 or we would have returned earlier. */
2073 } else {
2074 /* None ready: add our semaphore to list:
2075 We don't actually need any dynamic memory. Our entry on the
2076 list is only valid while we are in this function, so it's ok
2077 to use local variables (unless we're running in MPU compatible
2078 mode). */
2079 API_SELECT_CB_VAR_DECLARE(select_cb);
2080 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2081 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2082
2083 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2084 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2085 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2086 #if LWIP_NETCONN_SEM_PER_THREAD
2087 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2088 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2089 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2090 /* failed to create semaphore */
2091 set_errno(ENOMEM);
2092 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2093 API_SELECT_CB_VAR_FREE(select_cb);
2094 return -1;
2095 }
2096 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2097
2098 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2099
2100 /* Increase select_waiting for each socket we are interested in */
2101 maxfdp2 = maxfdp1;
2102 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2103 if ((readset && FD_ISSET(i, readset)) ||
2104 (writeset && FD_ISSET(i, writeset)) ||
2105 (exceptset && FD_ISSET(i, exceptset))) {
2106 struct lwip_sock *sock;
2107 SYS_ARCH_PROTECT(lev);
2108 sock = tryget_socket_unconn_locked(i);
2109 if (sock != NULL) {
2110 sock->select_waiting++;
2111 if (sock->select_waiting == 0) {
2112 /* overflow - too many threads waiting */
2113 sock->select_waiting--;
2114 nready = -1;
2115 maxfdp2 = i;
2116 SYS_ARCH_UNPROTECT(lev);
2117 done_socket(sock);
2118 set_errno(EBUSY);
2119 break;
2120 }
2121 SYS_ARCH_UNPROTECT(lev);
2122 done_socket(sock);
2123 } else {
2124 /* Not a valid socket */
2125 nready = -1;
2126 maxfdp2 = i;
2127 SYS_ARCH_UNPROTECT(lev);
2128 set_errno(EBADF);
2129 break;
2130 }
2131 }
2132 }
2133
2134 if (nready >= 0) {
2135 /* Call lwip_selscan again: there could have been events between
2136 the last scan (without us on the list) and putting us on the list! */
2137 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2138 if (nready < 0) {
2139 set_errno(EBADF);
2140 } else if (!nready) {
2141 /* Still none ready, just wait to be woken */
2142 if (timeout == 0) {
2143 /* Wait forever */
2144 msectimeout = 0;
2145 } else {
2146 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2147 if (msecs_long <= 0) {
2148 /* Wait 1ms at least (0 means wait forever) */
2149 msectimeout = 1;
2150 } else {
2151 msectimeout = (u32_t)msecs_long;
2152 }
2153 }
2154
2155 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2156 #if LWIP_NETCONN_SEM_PER_THREAD
2157 waited = 1;
2158 #endif
2159 }
2160 }
2161
2162 /* Decrease select_waiting for each socket we are interested in */
2163 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2164 if ((readset && FD_ISSET(i, readset)) ||
2165 (writeset && FD_ISSET(i, writeset)) ||
2166 (exceptset && FD_ISSET(i, exceptset))) {
2167 struct lwip_sock *sock;
2168 SYS_ARCH_PROTECT(lev);
2169 sock = tryget_socket_unconn_nouse(i);
2170 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2171 if (sock != NULL) {
2172 /* for now, handle select_waiting==0... */
2173 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2174 if (sock->select_waiting > 0) {
2175 sock->select_waiting--;
2176 }
2177 SYS_ARCH_UNPROTECT(lev);
2178 } else {
2179 SYS_ARCH_UNPROTECT(lev);
2180 /* Not a valid socket */
2181 nready = -1;
2182 set_errno(EBADF);
2183 }
2184 }
2185 }
2186
2187 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2188
2189 #if LWIP_NETCONN_SEM_PER_THREAD
2190 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2191 /* don't leave the thread-local semaphore signalled */
2192 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2193 }
2194 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2195 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2196 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2197 API_SELECT_CB_VAR_FREE(select_cb);
2198
2199 if (nready < 0) {
2200 /* This happens when a socket got closed while waiting */
2201 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2202 return -1;
2203 }
2204
2205 if (waitres == SYS_ARCH_TIMEOUT) {
2206 /* Timeout */
2207 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2208 /* This is OK as the local fdsets are empty and nready is zero,
2209 or we would have returned earlier. */
2210 } else {
2211 /* See what's set now after waiting */
2212 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2213 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2214 if (nready < 0) {
2215 set_errno(EBADF);
2216 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2217 return -1;
2218 }
2219 }
2220 }
2221 }
2222
2223 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2224 set_errno(0);
2225 if (readset) {
2226 *readset = lreadset;
2227 }
2228 if (writeset) {
2229 *writeset = lwriteset;
2230 }
2231 if (exceptset) {
2232 *exceptset = lexceptset;
2233 }
2234 return nready;
2235 }
2236 #endif /* LWIP_SOCKET_SELECT */
2237
2238 #if LWIP_SOCKET_POLL
2239 /** Options for the lwip_pollscan function. */
2240 enum lwip_pollscan_opts
2241 {
2242 /** Clear revents in each struct pollfd. */
2243 LWIP_POLLSCAN_CLEAR = 1,
2244
2245 /** Increment select_waiting in each struct lwip_sock. */
2246 LWIP_POLLSCAN_INC_WAIT = 2,
2247
2248 /** Decrement select_waiting in each struct lwip_sock. */
2249 LWIP_POLLSCAN_DEC_WAIT = 4
2250 };
2251
2252 /**
2253 * Update revents in each struct pollfd.
2254 * Optionally update select_waiting in struct lwip_sock.
2255 *
2256 * @param fds array of structures to update
2257 * @param nfds number of structures in fds
2258 * @param opts what to update and how
2259 * @return number of structures that have revents != 0
2260 */
2261 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2262 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2263 {
2264 int nready = 0;
2265 nfds_t fdi;
2266 struct lwip_sock *sock;
2267 SYS_ARCH_DECL_PROTECT(lev);
2268
2269 /* Go through each struct pollfd in the array. */
2270 for (fdi = 0; fdi < nfds; fdi++) {
2271 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2272 fds[fdi].revents = 0;
2273 }
2274
2275 /* Negative fd means the caller wants us to ignore this struct.
2276 POLLNVAL means we already detected that the fd is invalid;
2277 if another thread has since opened a new socket with that fd,
2278 we must not use that socket. */
2279 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2280 /* First get the socket's status (protected)... */
2281 SYS_ARCH_PROTECT(lev);
2282 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2283 if (sock != NULL) {
2284 void* lastdata = sock->lastdata.pbuf;
2285 s16_t rcvevent = sock->rcvevent;
2286 u16_t sendevent = sock->sendevent;
2287 u16_t errevent = sock->errevent;
2288
2289 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2290 sock->select_waiting++;
2291 if (sock->select_waiting == 0) {
2292 /* overflow - too many threads waiting */
2293 sock->select_waiting--;
2294 nready = -1;
2295 SYS_ARCH_UNPROTECT(lev);
2296 done_socket(sock);
2297 break;
2298 }
2299 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2300 /* for now, handle select_waiting==0... */
2301 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2302 if (sock->select_waiting > 0) {
2303 sock->select_waiting--;
2304 }
2305 }
2306 SYS_ARCH_UNPROTECT(lev);
2307 done_socket(sock);
2308
2309 /* ... then examine it: */
2310 /* See if netconn of this socket is ready for read */
2311 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2312 fds[fdi].revents |= POLLIN;
2313 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2314 }
2315 /* See if netconn of this socket is ready for write */
2316 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2317 fds[fdi].revents |= POLLOUT;
2318 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2319 }
2320 /* See if netconn of this socket had an error */
2321 if (errevent != 0) {
2322 /* POLLERR is output only. */
2323 fds[fdi].revents |= POLLERR;
2324 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2325 }
2326 } else {
2327 /* Not a valid socket */
2328 SYS_ARCH_UNPROTECT(lev);
2329 /* POLLNVAL is output only. */
2330 fds[fdi].revents |= POLLNVAL;
2331 return -1;
2332 }
2333 }
2334
2335 /* Will return the number of structures that have events,
2336 not the number of events. */
2337 if (fds[fdi].revents != 0) {
2338 nready++;
2339 }
2340 }
2341
2342 LWIP_ASSERT("nready >= 0", nready >= 0);
2343 return nready;
2344 }
2345
2346 #if LWIP_NETCONN_FULLDUPLEX
2347 /* Mark all sockets as used.
2348 *
2349 * All sockets are marked (and later unmarked), whether they are open or not.
2350 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2351 */
2352 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2353 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2354 {
2355 nfds_t fdi;
2356
2357 if(fds) {
2358 /* Go through each struct pollfd in the array. */
2359 for (fdi = 0; fdi < nfds; fdi++) {
2360 /* Increase the reference counter */
2361 tryget_socket_unconn(fds[fdi].fd);
2362 }
2363 }
2364 }
2365
2366 /* Let go all sockets that were marked as used when starting poll */
2367 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2368 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2369 {
2370 nfds_t fdi;
2371
2372 if(fds) {
2373 /* Go through each struct pollfd in the array. */
2374 for (fdi = 0; fdi < nfds; fdi++) {
2375 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2376 if (sock != NULL) {
2377 done_socket(sock);
2378 }
2379 }
2380 }
2381 }
2382 #else /* LWIP_NETCONN_FULLDUPLEX */
2383 #define lwip_poll_inc_sockets_used(fds, nfds)
2384 #define lwip_poll_dec_sockets_used(fds, nfds)
2385 #endif /* LWIP_NETCONN_FULLDUPLEX */
2386
2387 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2388 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2389 {
2390 u32_t waitres = 0;
2391 int nready;
2392 u32_t msectimeout;
2393 #if LWIP_NETCONN_SEM_PER_THREAD
2394 int waited = 0;
2395 #endif
2396
2397 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2398 (void*)fds, (int)nfds, timeout));
2399 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2400 set_errno(EINVAL); return -1;);
2401
2402 lwip_poll_inc_sockets_used(fds, nfds);
2403
2404 /* Go through each struct pollfd to count number of structures
2405 which currently match */
2406 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2407
2408 if (nready < 0) {
2409 lwip_poll_dec_sockets_used(fds, nfds);
2410 return -1;
2411 }
2412
2413 /* If we don't have any current events, then suspend if we are supposed to */
2414 if (!nready) {
2415 API_SELECT_CB_VAR_DECLARE(select_cb);
2416
2417 if (timeout == 0) {
2418 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2419 goto return_success;
2420 }
2421 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2422 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2423
2424 /* None ready: add our semaphore to list:
2425 We don't actually need any dynamic memory. Our entry on the
2426 list is only valid while we are in this function, so it's ok
2427 to use local variables. */
2428
2429 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2430 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2431 #if LWIP_NETCONN_SEM_PER_THREAD
2432 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2433 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2434 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2435 /* failed to create semaphore */
2436 set_errno(EAGAIN);
2437 lwip_poll_dec_sockets_used(fds, nfds);
2438 API_SELECT_CB_VAR_FREE(select_cb);
2439 return -1;
2440 }
2441 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2442
2443 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2444
2445 /* Increase select_waiting for each socket we are interested in.
2446 Also, check for events again: there could have been events between
2447 the last scan (without us on the list) and putting us on the list! */
2448 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2449
2450 if (!nready) {
2451 /* Still none ready, just wait to be woken */
2452 if (timeout < 0) {
2453 /* Wait forever */
2454 msectimeout = 0;
2455 } else {
2456 /* timeout == 0 would have been handled earlier. */
2457 LWIP_ASSERT("timeout > 0", timeout > 0);
2458 msectimeout = timeout;
2459 }
2460 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2461 #if LWIP_NETCONN_SEM_PER_THREAD
2462 waited = 1;
2463 #endif
2464 }
2465
2466 /* Decrease select_waiting for each socket we are interested in,
2467 and check which events occurred while we waited. */
2468 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2469
2470 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2471
2472 #if LWIP_NETCONN_SEM_PER_THREAD
2473 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2474 /* don't leave the thread-local semaphore signalled */
2475 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2476 }
2477 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2478 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2479 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2480 API_SELECT_CB_VAR_FREE(select_cb);
2481
2482 if (nready < 0) {
2483 /* This happens when a socket got closed while waiting */
2484 lwip_poll_dec_sockets_used(fds, nfds);
2485 return -1;
2486 }
2487
2488 if (waitres == SYS_ARCH_TIMEOUT) {
2489 /* Timeout */
2490 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2491 goto return_success;
2492 }
2493 }
2494
2495 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2496 return_success:
2497 lwip_poll_dec_sockets_used(fds, nfds);
2498 set_errno(0);
2499 return nready;
2500 }
2501
2502 /**
2503 * Check whether event_callback should wake up a thread waiting in
2504 * lwip_poll.
2505 */
2506 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2507 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2508 {
2509 nfds_t fdi;
2510 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2511 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2512 if (pollfd->fd == fd) {
2513 /* Do not update pollfd->revents right here;
2514 that would be a data race because lwip_pollscan
2515 accesses revents without protecting. */
2516 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2517 return 1;
2518 }
2519 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2520 return 1;
2521 }
2522 if (has_errevent) {
2523 /* POLLERR is output only. */
2524 return 1;
2525 }
2526 }
2527 }
2528 return 0;
2529 }
2530 #endif /* LWIP_SOCKET_POLL */
2531
2532 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2533 /**
2534 * Callback registered in the netconn layer for each socket-netconn.
2535 * Processes recvevent (data available) and wakes up tasks waiting for select.
2536 *
2537 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2538 * must have the core lock held when signaling the following events
2539 * as they might cause select_list_cb to be checked:
2540 * NETCONN_EVT_RCVPLUS
2541 * NETCONN_EVT_SENDPLUS
2542 * NETCONN_EVT_ERROR
2543 * This requirement will be asserted in select_check_waiters()
2544 */
2545 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2546 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2547 {
2548 int s, check_waiters;
2549 struct lwip_sock *sock;
2550 SYS_ARCH_DECL_PROTECT(lev);
2551
2552 LWIP_UNUSED_ARG(len);
2553
2554 /* Get socket */
2555 if (conn) {
2556 s = conn->socket;
2557 if (s < 0) {
2558 /* Data comes in right away after an accept, even though
2559 * the server task might not have created a new socket yet.
2560 * Just count down (or up) if that's the case and we
2561 * will use the data later. Note that only receive events
2562 * can happen before the new socket is set up. */
2563 SYS_ARCH_PROTECT(lev);
2564 if (conn->socket < 0) {
2565 if (evt == NETCONN_EVT_RCVPLUS) {
2566 /* conn->socket is -1 on initialization
2567 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2568 conn->socket--;
2569 }
2570 SYS_ARCH_UNPROTECT(lev);
2571 return;
2572 }
2573 s = conn->socket;
2574 SYS_ARCH_UNPROTECT(lev);
2575 }
2576 #if ESP_LWIP_LOCK
2577 sock = tryget_socket_unconn_nouse(s);
2578 #else
2579 sock = get_socket(s);
2580 #endif /* ESP_LWIP_LOCK */
2581 if (!sock) {
2582 return;
2583 }
2584 } else {
2585 return;
2586 }
2587
2588 check_waiters = 1;
2589 SYS_ARCH_PROTECT(lev);
2590 /* Set event as required */
2591 switch (evt) {
2592 case NETCONN_EVT_RCVPLUS:
2593 sock->rcvevent++;
2594 if (sock->rcvevent > 1) {
2595 check_waiters = 0;
2596 }
2597 break;
2598 case NETCONN_EVT_RCVMINUS:
2599 sock->rcvevent--;
2600 check_waiters = 0;
2601 break;
2602 case NETCONN_EVT_SENDPLUS:
2603 if (sock->sendevent) {
2604 check_waiters = 0;
2605 }
2606 sock->sendevent = 1;
2607 break;
2608 case NETCONN_EVT_SENDMINUS:
2609 sock->sendevent = 0;
2610 check_waiters = 0;
2611 break;
2612 case NETCONN_EVT_ERROR:
2613 sock->errevent = 1;
2614 break;
2615 default:
2616 LWIP_ASSERT("unknown event", 0);
2617 break;
2618 }
2619
2620 if (sock->select_waiting && check_waiters) {
2621 /* Save which events are active */
2622 int has_recvevent, has_sendevent, has_errevent;
2623 has_recvevent = sock->rcvevent > 0;
2624 has_sendevent = sock->sendevent != 0;
2625 has_errevent = sock->errevent != 0;
2626 SYS_ARCH_UNPROTECT(lev);
2627 /* Check any select calls waiting on this socket */
2628 #if ESP_LWIP_SELECT
2629 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent, sock);
2630 #else
2631 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2632 #endif/* LWIP_SOCKET_SELECT */
2633 } else {
2634 SYS_ARCH_UNPROTECT(lev);
2635 }
2636 #if !ESP_LWIP_LOCK
2637 done_socket(sock);
2638 #endif /* !ESP_LWIP_LOCK */
2639 }
2640
2641 /**
2642 * Check if any select waiters are waiting on this socket and its events
2643 *
2644 * @note on synchronization of select_cb_list:
2645 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2646 * the core lock. We do a single pass through the list and signal any waiters.
2647 * Core lock should already be held when calling here!!!!
2648
2649 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2650 * of the loop, thus creating a possibility where a thread could modify the
2651 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2652 * detect this change and restart the list walk. The list is expected to be small
2653 */
2654 #if ESP_LWIP_SELECT
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent,struct lwip_sock * sock_select)2655 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent, struct lwip_sock *sock_select)
2656 #else
2657 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2658 #endif /* LWIP_SOCKET_SELECT */
2659 {
2660 struct lwip_select_cb *scb;
2661 #if ESP_LWIP_SELECT
2662 struct lwip_sock *sock = sock_select;
2663 #else
2664 struct lwip_sock *sock
2665 #endif /* ESP_LWIP_LOCK */
2666 #if !LWIP_TCPIP_CORE_LOCKING
2667 int last_select_cb_ctr;
2668 SYS_ARCH_DECL_PROTECT(lev);
2669 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2670
2671 LWIP_ASSERT_CORE_LOCKED();
2672 #if !LWIP_TCPIP_CORE_LOCKING
2673 SYS_ARCH_PROTECT(lev);
2674 again:
2675 /* remember the state of select_cb_list to detect changes */
2676 last_select_cb_ctr = select_cb_ctr;
2677 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2678 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2679 if (scb->sem_signalled == 0) {
2680 /* semaphore not signalled yet */
2681 int do_signal = 0;
2682 #if LWIP_SOCKET_POLL
2683 if (scb->poll_fds != NULL) {
2684 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2685 }
2686 #endif /* LWIP_SOCKET_POLL */
2687 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2688 else
2689 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2690 #if LWIP_SOCKET_SELECT
2691 {
2692 /* Test this select call for our socket */
2693 #if ESP_LWIP_SELECT
2694 if (sock->rcvevent) {
2695 #else
2696 if (has_recvevent) {
2697 #endif/* ESP_LWIP_SELECT */
2698 if (scb->readset && FD_ISSET(s, scb->readset)) {
2699 do_signal = 1;
2700 }
2701 }
2702 #if ESP_LWIP_SELECT
2703 if (sock->sendevent) {
2704 #else
2705 if (has_sendevent) {
2706 #endif/* ESP_LWIP_SELECT */
2707 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2708 do_signal = 1;
2709 }
2710 }
2711 #if ESP_LWIP_SELECT
2712 if (sock->errevent) {
2713 #else
2714 if (has_errevent) {
2715 #endif/* ESP_LWIP_SELECT */
2716 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2717 do_signal = 1;
2718 }
2719 }
2720 }
2721 #endif /* LWIP_SOCKET_SELECT */
2722 if (do_signal) {
2723 scb->sem_signalled = 1;
2724 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2725 the semaphore, as this might lead to the select thread taking itself off the list,
2726 invalidating the semaphore. */
2727 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2728 }
2729 }
2730 #if LWIP_TCPIP_CORE_LOCKING
2731 }
2732 #else
2733 /* unlock interrupts with each step */
2734 SYS_ARCH_UNPROTECT(lev);
2735 /* this makes sure interrupt protection time is short */
2736 SYS_ARCH_PROTECT(lev);
2737 if (last_select_cb_ctr != select_cb_ctr) {
2738 /* someone has changed select_cb_list, restart at the beginning */
2739 goto again;
2740 }
2741 /* remember the state of select_cb_list to detect changes */
2742 last_select_cb_ctr = select_cb_ctr;
2743 }
2744 SYS_ARCH_UNPROTECT(lev);
2745 #endif
2746 }
2747 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2748
2749 /**
2750 * Close one end of a full-duplex connection.
2751 */
2752 int
2753 lwip_shutdown(int s, int how)
2754 {
2755 struct lwip_sock *sock;
2756 err_t err;
2757 u8_t shut_rx = 0, shut_tx = 0;
2758
2759 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2760
2761 sock = get_socket(s);
2762 if (!sock) {
2763 return -1;
2764 }
2765
2766 if (sock->conn != NULL) {
2767 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2768 sock_set_errno(sock, EOPNOTSUPP);
2769 done_socket(sock);
2770 return -1;
2771 }
2772 } else {
2773 sock_set_errno(sock, ENOTCONN);
2774 done_socket(sock);
2775 return -1;
2776 }
2777
2778 if (how == SHUT_RD) {
2779 shut_rx = 1;
2780 } else if (how == SHUT_WR) {
2781 shut_tx = 1;
2782 } else if (how == SHUT_RDWR) {
2783 shut_rx = 1;
2784 shut_tx = 1;
2785 } else {
2786 sock_set_errno(sock, EINVAL);
2787 done_socket(sock);
2788 return -1;
2789 }
2790 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2791
2792 sock_set_errno(sock, err_to_errno(err));
2793 done_socket(sock);
2794 return (err == ERR_OK ? 0 : -1);
2795 }
2796
2797 static int
2798 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2799 {
2800 struct lwip_sock *sock;
2801 union sockaddr_aligned saddr;
2802 ip_addr_t naddr;
2803 u16_t port;
2804 err_t err;
2805
2806 sock = get_socket(s);
2807 if (!sock) {
2808 return -1;
2809 }
2810
2811 /* get the IP address and port */
2812 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2813 if (err != ERR_OK) {
2814 sock_set_errno(sock, err_to_errno(err));
2815 done_socket(sock);
2816 return -1;
2817 }
2818
2819 #if LWIP_IPV4 && LWIP_IPV6
2820 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2821 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2822 IP_IS_V4_VAL(naddr)) {
2823 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2824 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2825 }
2826 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2827
2828 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2829
2830 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2831 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2832 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2833
2834 /** saddr.sa.sa_len */
2835 if (*namelen > sizeof(struct sockaddr_in)) {
2836 *namelen = sizeof(struct sockaddr_in);
2837 }
2838 MEMCPY(name, &saddr, *namelen);
2839
2840 sock_set_errno(sock, 0);
2841 done_socket(sock);
2842 return 0;
2843 }
2844
2845 int
2846 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2847 {
2848 return lwip_getaddrname(s, name, namelen, 0);
2849 }
2850
2851 int
2852 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2853 {
2854 return lwip_getaddrname(s, name, namelen, 1);
2855 }
2856
2857 int
2858 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2859 {
2860 int err;
2861 struct lwip_sock *sock = get_socket(s);
2862 #if !LWIP_TCPIP_CORE_LOCKING
2863 err_t cberr;
2864 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2865 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2866
2867 if (!sock) {
2868 return -1;
2869 }
2870
2871 if ((NULL == optval) || (NULL == optlen)) {
2872 sock_set_errno(sock, EFAULT);
2873 done_socket(sock);
2874 return -1;
2875 }
2876
2877 #if LWIP_TCPIP_CORE_LOCKING
2878 /* core-locking can just call the -impl function */
2879 LOCK_TCPIP_CORE();
2880 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2881 UNLOCK_TCPIP_CORE();
2882
2883 #else /* LWIP_TCPIP_CORE_LOCKING */
2884
2885 #if LWIP_MPU_COMPATIBLE
2886 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2887 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2888 sock_set_errno(sock, ENOBUFS);
2889 done_socket(sock);
2890 return -1;
2891 }
2892 #endif /* LWIP_MPU_COMPATIBLE */
2893
2894 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2895 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2896 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2897 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2898 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2899 #if !LWIP_MPU_COMPATIBLE
2900 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2901 #endif /* !LWIP_MPU_COMPATIBLE */
2902 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2903 #if LWIP_NETCONN_SEM_PER_THREAD
2904 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2905 #else
2906 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2907 #endif
2908 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2909 if (cberr != ERR_OK) {
2910 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2911 sock_set_errno(sock, err_to_errno(cberr));
2912 done_socket(sock);
2913 return -1;
2914 }
2915 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2916
2917 /* write back optlen and optval */
2918 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2919 #if LWIP_MPU_COMPATIBLE
2920 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2921 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2922 #endif /* LWIP_MPU_COMPATIBLE */
2923
2924 /* maybe lwip_getsockopt_internal has changed err */
2925 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2926 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2927 #endif /* LWIP_TCPIP_CORE_LOCKING */
2928
2929 sock_set_errno(sock, err);
2930 done_socket(sock);
2931 return err ? -1 : 0;
2932 }
2933
2934 #if !LWIP_TCPIP_CORE_LOCKING
2935 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2936 * to get into the tcpip_thread
2937 */
2938 static void
2939 lwip_getsockopt_callback(void *arg)
2940 {
2941 struct lwip_setgetsockopt_data *data;
2942 LWIP_ASSERT("arg != NULL", arg != NULL);
2943 data = (struct lwip_setgetsockopt_data *)arg;
2944
2945 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2946 #if LWIP_MPU_COMPATIBLE
2947 data->optval,
2948 #else /* LWIP_MPU_COMPATIBLE */
2949 data->optval.p,
2950 #endif /* LWIP_MPU_COMPATIBLE */
2951 &data->optlen);
2952
2953 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2954 }
2955 #endif /* LWIP_TCPIP_CORE_LOCKING */
2956
2957 static int
2958 lwip_sockopt_to_ipopt(int optname)
2959 {
2960 /* Map SO_* values to our internal SOF_* values
2961 * We should not rely on #defines in socket.h
2962 * being in sync with ip.h.
2963 */
2964 switch (optname) {
2965 case SO_BROADCAST:
2966 return SOF_BROADCAST;
2967 case SO_KEEPALIVE:
2968 return SOF_KEEPALIVE;
2969 case SO_REUSEADDR:
2970 return SOF_REUSEADDR;
2971 default:
2972 LWIP_ASSERT("Unknown socket option", 0);
2973 return 0;
2974 }
2975 }
2976
2977 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2978 * same argument as lwip_getsockopt, either called directly or through callback
2979 */
2980 static int
2981 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2982 {
2983 int err = 0;
2984 struct lwip_sock *sock = tryget_socket(s);
2985 if (!sock) {
2986 return EBADF;
2987 }
2988
2989 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2990 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2991 return err;
2992 }
2993 #endif
2994
2995 switch (level) {
2996
2997 /* Level: SOL_SOCKET */
2998 case SOL_SOCKET:
2999 switch (optname) {
3000
3001 #if LWIP_TCP
3002 case SO_ACCEPTCONN:
3003 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3004 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
3005 done_socket(sock);
3006 return ENOPROTOOPT;
3007 }
3008 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
3009 *(int *)optval = 1;
3010 } else {
3011 *(int *)optval = 0;
3012 }
3013 break;
3014 #endif /* LWIP_TCP */
3015
3016 /* The option flags */
3017 case SO_BROADCAST:
3018 case SO_KEEPALIVE:
3019 #if SO_REUSE
3020 case SO_REUSEADDR:
3021 #endif /* SO_REUSE */
3022 if ((optname == SO_BROADCAST) &&
3023 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3024 done_socket(sock);
3025 return ENOPROTOOPT;
3026 }
3027
3028 optname = lwip_sockopt_to_ipopt(optname);
3029
3030 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3031 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
3032 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
3033 s, optname, (*(int *)optval ? "on" : "off")));
3034 break;
3035
3036 case SO_TYPE:
3037 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3038 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3039 case NETCONN_RAW:
3040 *(int *)optval = SOCK_RAW;
3041 break;
3042 case NETCONN_TCP:
3043 *(int *)optval = SOCK_STREAM;
3044 break;
3045 case NETCONN_UDP:
3046 *(int *)optval = SOCK_DGRAM;
3047 break;
3048 default: /* unrecognized socket type */
3049 *(int *)optval = netconn_type(sock->conn);
3050 LWIP_DEBUGF(SOCKETS_DEBUG,
3051 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
3052 s, *(int *)optval));
3053 } /* switch (netconn_type(sock->conn)) */
3054 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
3055 s, *(int *)optval));
3056 break;
3057
3058 case SO_ERROR:
3059 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
3060 *(int *)optval = err_to_errno(netconn_err(sock->conn));
3061 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
3062 s, *(int *)optval));
3063 break;
3064
3065 #if LWIP_SO_SNDTIMEO
3066 case SO_SNDTIMEO:
3067 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3068 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
3069 break;
3070 #endif /* LWIP_SO_SNDTIMEO */
3071 #if LWIP_SO_RCVTIMEO
3072 case SO_RCVTIMEO:
3073 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3074 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
3075 break;
3076 #endif /* LWIP_SO_RCVTIMEO */
3077 #if LWIP_SO_RCVBUF
3078 case SO_RCVBUF:
3079 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3080 *(int *)optval = netconn_get_recvbufsize(sock->conn);
3081 break;
3082 #endif /* LWIP_SO_RCVBUF */
3083 #if LWIP_SO_LINGER
3084 case SO_LINGER: {
3085 s16_t conn_linger;
3086 struct linger *linger = (struct linger *)optval;
3087 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
3088 conn_linger = sock->conn->linger;
3089 if (conn_linger >= 0) {
3090 linger->l_onoff = 1;
3091 linger->l_linger = (int)conn_linger;
3092 } else {
3093 linger->l_onoff = 0;
3094 linger->l_linger = 0;
3095 }
3096 }
3097 break;
3098 #endif /* LWIP_SO_LINGER */
3099 #if LWIP_UDP
3100 case SO_NO_CHECK:
3101 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3102 #if LWIP_UDPLITE
3103 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3104 /* this flag is only available for UDP, not for UDP lite */
3105 done_socket(sock);
3106 return EAFNOSUPPORT;
3107 }
3108 #endif /* LWIP_UDPLITE */
3109 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3110 break;
3111 #endif /* LWIP_UDP*/
3112 default:
3113 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3114 s, optname));
3115 err = ENOPROTOOPT;
3116 break;
3117 } /* switch (optname) */
3118 break;
3119
3120 /* Level: IPPROTO_IP */
3121 case IPPROTO_IP:
3122 switch (optname) {
3123 case IP_TTL:
3124 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3125 *(int *)optval = sock->conn->pcb.ip->ttl;
3126 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3127 s, *(int *)optval));
3128 break;
3129 case IP_TOS:
3130 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3131 *(int *)optval = sock->conn->pcb.ip->tos;
3132 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3133 s, *(int *)optval));
3134 break;
3135 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3136 case IP_MULTICAST_TTL:
3137 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3138 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3139 done_socket(sock);
3140 return ENOPROTOOPT;
3141 }
3142 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3143 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3144 s, *(int *)optval));
3145 break;
3146 case IP_MULTICAST_IF:
3147 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3148 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3149 done_socket(sock);
3150 return ENOPROTOOPT;
3151 }
3152 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3153 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3154 s, *(u32_t *)optval));
3155 break;
3156 case IP_MULTICAST_LOOP:
3157 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3158 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3159 *(u8_t *)optval = 1;
3160 } else {
3161 *(u8_t *)optval = 0;
3162 }
3163 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3164 s, *(int *)optval));
3165 break;
3166 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3167 default:
3168 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3169 s, optname));
3170 err = ENOPROTOOPT;
3171 break;
3172 } /* switch (optname) */
3173 break;
3174
3175 #if LWIP_TCP
3176 /* Level: IPPROTO_TCP */
3177 case IPPROTO_TCP:
3178 /* Special case: all IPPROTO_TCP option take an int */
3179 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3180 if (sock->conn->pcb.tcp->state == LISTEN) {
3181 done_socket(sock);
3182 return EINVAL;
3183 }
3184 switch (optname) {
3185 case TCP_NODELAY:
3186 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3187 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3188 s, (*(int *)optval) ? "on" : "off") );
3189 break;
3190 case TCP_KEEPALIVE:
3191 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3192 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3193 s, *(int *)optval));
3194 break;
3195
3196 #if LWIP_TCP_KEEPALIVE
3197 case TCP_KEEPIDLE:
3198 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3199 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3200 s, *(int *)optval));
3201 break;
3202 case TCP_KEEPINTVL:
3203 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3204 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3205 s, *(int *)optval));
3206 break;
3207 case TCP_KEEPCNT:
3208 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3209 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3210 s, *(int *)optval));
3211 break;
3212 #endif /* LWIP_TCP_KEEPALIVE */
3213 default:
3214 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3215 s, optname));
3216 err = ENOPROTOOPT;
3217 break;
3218 } /* switch (optname) */
3219 break;
3220 #endif /* LWIP_TCP */
3221
3222 #if LWIP_IPV6
3223 /* Level: IPPROTO_IPV6 */
3224 case IPPROTO_IPV6:
3225 switch (optname) {
3226 case IPV6_V6ONLY:
3227 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3228 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3229 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3230 s, *(int *)optval));
3231 break;
3232 default:
3233 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3234 s, optname));
3235 err = ENOPROTOOPT;
3236 break;
3237 } /* switch (optname) */
3238 break;
3239 #endif /* LWIP_IPV6 */
3240
3241 #if ESP_IPV6
3242 #if LWIP_IPV6_MLD && LWIP_MULTICAST_TX_OPTIONS /* Multicast options, similar to LWIP_IGMP options for IPV4 */
3243 case IPV6_MULTICAST_IF: /* NB: like IP_MULTICAST_IF, this returns an IP not an index */
3244 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3245 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3246 return ENOPROTOOPT;
3247 }
3248 *(u8_t*)optval = udp_get_multicast_netif_index(sock->conn->pcb.udp);
3249 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_MULTICAST_IF) = 0x%"X32_F"\n",
3250 s, *(u32_t *)optval));
3251 break;
3252 case IPV6_MULTICAST_HOPS:
3253 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3254 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3255 return ENOPROTOOPT;
3256 }
3257 *(u8_t*)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3258 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IP_MULTICAST_LOOP) = %d\n",
3259 s, *(int *)optval));
3260 break;
3261 case IPV6_MULTICAST_LOOP:
3262 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3263 if ((udp_flags(sock->conn->pcb.udp) & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3264 *(u8_t*)optval = 1;
3265 } else {
3266 *(u8_t*)optval = 0;
3267 }
3268 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IP_MULTICAST_LOOP) = %d\n",
3269 s, *(int *)optval));
3270 break;
3271
3272 #endif /* LWIP_IPV6_MLD && LWIP_MULTICAST_TX_OPTIONS */
3273 #endif /* ESP_IPV6 */
3274
3275 #if LWIP_UDP && LWIP_UDPLITE
3276 /* Level: IPPROTO_UDPLITE */
3277 case IPPROTO_UDPLITE:
3278 /* Special case: all IPPROTO_UDPLITE option take an int */
3279 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3280 /* If this is no UDP lite socket, ignore any options. */
3281 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3282 done_socket(sock);
3283 return ENOPROTOOPT;
3284 }
3285 switch (optname) {
3286 case UDPLITE_SEND_CSCOV:
3287 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3288 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3289 s, (*(int *)optval)) );
3290 break;
3291 case UDPLITE_RECV_CSCOV:
3292 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3293 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3294 s, (*(int *)optval)) );
3295 break;
3296 default:
3297 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3298 s, optname));
3299 err = ENOPROTOOPT;
3300 break;
3301 } /* switch (optname) */
3302 break;
3303 #endif /* LWIP_UDP */
3304 /* Level: IPPROTO_RAW */
3305 case IPPROTO_RAW:
3306 switch (optname) {
3307 #if LWIP_IPV6 && LWIP_RAW
3308 case IPV6_CHECKSUM:
3309 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3310 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3311 *(int *)optval = -1;
3312 } else {
3313 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3314 }
3315 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3316 s, (*(int *)optval)) );
3317 break;
3318 #endif /* LWIP_IPV6 && LWIP_RAW */
3319 default:
3320 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3321 s, optname));
3322 err = ENOPROTOOPT;
3323 break;
3324 } /* switch (optname) */
3325 break;
3326 default:
3327 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3328 s, level, optname));
3329 err = ENOPROTOOPT;
3330 break;
3331 } /* switch (level) */
3332
3333 done_socket(sock);
3334 return err;
3335 }
3336
3337 int
3338 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3339 {
3340 int err = 0;
3341 struct lwip_sock *sock = get_socket(s);
3342 #if !LWIP_TCPIP_CORE_LOCKING
3343 err_t cberr;
3344 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3345 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3346
3347 if (!sock) {
3348 return -1;
3349 }
3350
3351 if (NULL == optval) {
3352 sock_set_errno(sock, EFAULT);
3353 done_socket(sock);
3354 return -1;
3355 }
3356
3357 #if LWIP_TCPIP_CORE_LOCKING
3358 /* core-locking can just call the -impl function */
3359 LOCK_TCPIP_CORE();
3360 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3361 UNLOCK_TCPIP_CORE();
3362
3363 #else /* LWIP_TCPIP_CORE_LOCKING */
3364
3365 #if LWIP_MPU_COMPATIBLE
3366 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3367 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3368 sock_set_errno(sock, ENOBUFS);
3369 done_socket(sock);
3370 return -1;
3371 }
3372 #endif /* LWIP_MPU_COMPATIBLE */
3373
3374 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3375 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3376 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3377 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3378 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3379 #if LWIP_MPU_COMPATIBLE
3380 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3381 #else /* LWIP_MPU_COMPATIBLE */
3382 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3383 #endif /* LWIP_MPU_COMPATIBLE */
3384 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3385 #if LWIP_NETCONN_SEM_PER_THREAD
3386 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3387 #else
3388 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3389 #endif
3390 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3391 if (cberr != ERR_OK) {
3392 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3393 sock_set_errno(sock, err_to_errno(cberr));
3394 done_socket(sock);
3395 return -1;
3396 }
3397 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3398
3399 /* maybe lwip_getsockopt_internal has changed err */
3400 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3401 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3402 #endif /* LWIP_TCPIP_CORE_LOCKING */
3403
3404 sock_set_errno(sock, err);
3405 done_socket(sock);
3406 return err ? -1 : 0;
3407 }
3408
3409 #if !LWIP_TCPIP_CORE_LOCKING
3410 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3411 * to get into the tcpip_thread
3412 */
3413 static void
3414 lwip_setsockopt_callback(void *arg)
3415 {
3416 struct lwip_setgetsockopt_data *data;
3417 LWIP_ASSERT("arg != NULL", arg != NULL);
3418 data = (struct lwip_setgetsockopt_data *)arg;
3419
3420 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3421 #if LWIP_MPU_COMPATIBLE
3422 data->optval,
3423 #else /* LWIP_MPU_COMPATIBLE */
3424 data->optval.pc,
3425 #endif /* LWIP_MPU_COMPATIBLE */
3426 data->optlen);
3427
3428 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3429 }
3430 #endif /* LWIP_TCPIP_CORE_LOCKING */
3431
3432 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3433 * same argument as lwip_setsockopt, either called directly or through callback
3434 */
3435 static int
3436 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3437 {
3438 int err = 0;
3439 struct lwip_sock *sock = tryget_socket(s);
3440 if (!sock) {
3441 return EBADF;
3442 }
3443
3444 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3445 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3446 return err;
3447 }
3448 #endif
3449
3450 switch (level) {
3451
3452 /* Level: SOL_SOCKET */
3453 case SOL_SOCKET:
3454 switch (optname) {
3455
3456 /* SO_ACCEPTCONN is get-only */
3457
3458 /* The option flags */
3459 case SO_BROADCAST:
3460 case SO_KEEPALIVE:
3461 #if SO_REUSE
3462 case SO_REUSEADDR:
3463 #endif /* SO_REUSE */
3464 if ((optname == SO_BROADCAST) &&
3465 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3466 done_socket(sock);
3467 return ENOPROTOOPT;
3468 }
3469
3470 optname = lwip_sockopt_to_ipopt(optname);
3471
3472 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3473 if (*(const int *)optval) {
3474 ip_set_option(sock->conn->pcb.ip, optname);
3475 } else {
3476 ip_reset_option(sock->conn->pcb.ip, optname);
3477 }
3478 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3479 s, optname, (*(const int *)optval ? "on" : "off")));
3480 break;
3481
3482 /* SO_TYPE is get-only */
3483 /* SO_ERROR is get-only */
3484
3485 #if LWIP_SO_SNDTIMEO
3486 case SO_SNDTIMEO: {
3487 long ms_long;
3488 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3489 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3490 if (ms_long < 0) {
3491 done_socket(sock);
3492 return EINVAL;
3493 }
3494 netconn_set_sendtimeout(sock->conn, ms_long);
3495 break;
3496 }
3497 #endif /* LWIP_SO_SNDTIMEO */
3498 #if LWIP_SO_RCVTIMEO
3499 case SO_RCVTIMEO: {
3500 long ms_long;
3501 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3502 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3503 if (ms_long < 0) {
3504 done_socket(sock);
3505 return EINVAL;
3506 }
3507 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3508 break;
3509 }
3510 #endif /* LWIP_SO_RCVTIMEO */
3511 #if LWIP_SO_RCVBUF
3512 case SO_RCVBUF:
3513 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3514 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3515 break;
3516 #endif /* LWIP_SO_RCVBUF */
3517 #if LWIP_SO_LINGER
3518 case SO_LINGER: {
3519 const struct linger *linger = (const struct linger *)optval;
3520 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3521 if (linger->l_onoff) {
3522 int lingersec = linger->l_linger;
3523 if (lingersec < 0) {
3524 done_socket(sock);
3525 return EINVAL;
3526 }
3527 if (lingersec > 0xFFFF) {
3528 lingersec = 0xFFFF;
3529 }
3530 sock->conn->linger = (s16_t)lingersec;
3531 } else {
3532 sock->conn->linger = -1;
3533 }
3534 }
3535 break;
3536 #endif /* LWIP_SO_LINGER */
3537 #if LWIP_UDP
3538 case SO_NO_CHECK:
3539 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3540 #if LWIP_UDPLITE
3541 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3542 /* this flag is only available for UDP, not for UDP lite */
3543 done_socket(sock);
3544 return EAFNOSUPPORT;
3545 }
3546 #endif /* LWIP_UDPLITE */
3547 if (*(const int *)optval) {
3548 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3549 } else {
3550 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3551 }
3552 break;
3553 #endif /* LWIP_UDP */
3554 case SO_BINDTODEVICE: {
3555 const struct ifreq *iface;
3556 struct netif *n = NULL;
3557
3558 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3559
3560 iface = (const struct ifreq *)optval;
3561 if (iface->ifr_name[0] != 0) {
3562 n = netif_find(iface->ifr_name);
3563 if (n == NULL) {
3564 done_socket(sock);
3565 return ENODEV;
3566 }
3567 }
3568
3569 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3570 #if LWIP_TCP
3571 case NETCONN_TCP:
3572 tcp_bind_netif(sock->conn->pcb.tcp, n);
3573 break;
3574 #endif
3575 #if LWIP_UDP
3576 case NETCONN_UDP:
3577 udp_bind_netif(sock->conn->pcb.udp, n);
3578 break;
3579 #endif
3580 #if LWIP_RAW
3581 case NETCONN_RAW:
3582 raw_bind_netif(sock->conn->pcb.raw, n);
3583 break;
3584 #endif
3585 default:
3586 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3587 break;
3588 }
3589 }
3590 break;
3591 default:
3592 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3593 s, optname));
3594 err = ENOPROTOOPT;
3595 break;
3596 } /* switch (optname) */
3597 break;
3598
3599 /* Level: IPPROTO_IP */
3600 case IPPROTO_IP:
3601 switch (optname) {
3602 case IP_TTL:
3603 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3604 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3605 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3606 s, sock->conn->pcb.ip->ttl));
3607 break;
3608 case IP_TOS:
3609 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3610 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3611 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3612 s, sock->conn->pcb.ip->tos));
3613 break;
3614 #if LWIP_NETBUF_RECVINFO
3615 case IP_PKTINFO:
3616 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3617 if (*(const int *)optval) {
3618 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3619 } else {
3620 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3621 }
3622 break;
3623 #endif /* LWIP_NETBUF_RECVINFO */
3624 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3625 case IP_MULTICAST_TTL:
3626 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3627 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3628 break;
3629 case IP_MULTICAST_IF: {
3630 ip4_addr_t if_addr;
3631 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3632 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3633 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3634 }
3635 break;
3636 case IP_MULTICAST_LOOP:
3637 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3638 if (*(const u8_t *)optval) {
3639 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3640 } else {
3641 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3642 }
3643 break;
3644 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3645 #if LWIP_IGMP
3646 case IP_ADD_MEMBERSHIP:
3647 case IP_DROP_MEMBERSHIP: {
3648 /* If this is a TCP or a RAW socket, ignore these options. */
3649 err_t igmp_err;
3650 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3651 ip4_addr_t if_addr;
3652 ip4_addr_t multi_addr;
3653 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3654 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3655 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3656 if (optname == IP_ADD_MEMBERSHIP) {
3657 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3658 /* cannot track membership (out of memory) */
3659 err = ENOMEM;
3660 igmp_err = ERR_OK;
3661 } else {
3662 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3663 }
3664 } else {
3665 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3666 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3667 }
3668 if (igmp_err != ERR_OK) {
3669 err = EADDRNOTAVAIL;
3670 }
3671 }
3672 break;
3673 #endif /* LWIP_IGMP */
3674 default:
3675 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3676 s, optname));
3677 err = ENOPROTOOPT;
3678 break;
3679 } /* switch (optname) */
3680 break;
3681
3682 #if LWIP_TCP
3683 /* Level: IPPROTO_TCP */
3684 case IPPROTO_TCP:
3685 /* Special case: all IPPROTO_TCP option take an int */
3686 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3687 if (sock->conn->pcb.tcp->state == LISTEN) {
3688 done_socket(sock);
3689 return EINVAL;
3690 }
3691 switch (optname) {
3692 case TCP_NODELAY:
3693 if (*(const int *)optval) {
3694 tcp_nagle_disable(sock->conn->pcb.tcp);
3695 } else {
3696 tcp_nagle_enable(sock->conn->pcb.tcp);
3697 }
3698 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3699 s, (*(const int *)optval) ? "on" : "off") );
3700 break;
3701 case TCP_KEEPALIVE:
3702 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3703 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3704 s, sock->conn->pcb.tcp->keep_idle));
3705 break;
3706
3707 #if LWIP_TCP_KEEPALIVE
3708 case TCP_KEEPIDLE:
3709 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3710 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3711 s, sock->conn->pcb.tcp->keep_idle));
3712 break;
3713 case TCP_KEEPINTVL:
3714 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3715 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3716 s, sock->conn->pcb.tcp->keep_intvl));
3717 break;
3718 case TCP_KEEPCNT:
3719 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3720 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3721 s, sock->conn->pcb.tcp->keep_cnt));
3722 break;
3723 #endif /* LWIP_TCP_KEEPALIVE */
3724 default:
3725 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3726 s, optname));
3727 err = ENOPROTOOPT;
3728 break;
3729 } /* switch (optname) */
3730 break;
3731 #endif /* LWIP_TCP*/
3732
3733 #if LWIP_IPV6
3734 /* Level: IPPROTO_IPV6 */
3735 case IPPROTO_IPV6:
3736 switch (optname) {
3737 case IPV6_V6ONLY:
3738 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3739 if (*(const int *)optval) {
3740 netconn_set_ipv6only(sock->conn, 1);
3741 } else {
3742 netconn_set_ipv6only(sock->conn, 0);
3743 }
3744 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3745 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3746 break;
3747 #if ESP_IPV6
3748 case IPV6_MULTICAST_IF: /* NB: like IP_MULTICAST_IF, this takes an IP not an index */
3749 {
3750 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3751 udp_set_multicast_netif_index(sock->conn->pcb.udp, (u8_t)(*(const u8_t*)optval));
3752 }
3753 break;
3754 case IPV6_MULTICAST_HOPS:
3755 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3756 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t*)optval));
3757 break;
3758 case IPV6_MULTICAST_LOOP:
3759 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3760 if (*(const u8_t*)optval) {
3761 udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) | UDP_FLAGS_MULTICAST_LOOP);
3762 } else {
3763 udp_setflags(sock->conn->pcb.udp, udp_flags(sock->conn->pcb.udp) & ~UDP_FLAGS_MULTICAST_LOOP);
3764 }
3765 break;
3766 #endif/* ESP_IPV6 */
3767 #if LWIP_IPV6_MLD
3768 case IPV6_JOIN_GROUP:
3769 case IPV6_LEAVE_GROUP: {
3770 /* If this is a TCP or a RAW socket, ignore these options. */
3771 err_t mld6_err;
3772 struct netif *netif;
3773 ip6_addr_t multi_addr;
3774 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3775 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3776 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3777 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3778 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3779 if (netif == NULL) {
3780 err = EADDRNOTAVAIL;
3781 break;
3782 }
3783
3784 if (optname == IPV6_JOIN_GROUP) {
3785 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3786 /* cannot track membership (out of memory) */
3787 err = ENOMEM;
3788 mld6_err = ERR_OK;
3789 } else {
3790 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3791 }
3792 } else {
3793 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3794 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3795 }
3796 if (mld6_err != ERR_OK) {
3797 err = EADDRNOTAVAIL;
3798 }
3799 }
3800 break;
3801 #endif /* LWIP_IPV6_MLD */
3802 default:
3803 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3804 s, optname));
3805 err = ENOPROTOOPT;
3806 break;
3807 } /* switch (optname) */
3808 break;
3809 #endif /* LWIP_IPV6 */
3810
3811 #if LWIP_UDP && LWIP_UDPLITE
3812 /* Level: IPPROTO_UDPLITE */
3813 case IPPROTO_UDPLITE:
3814 /* Special case: all IPPROTO_UDPLITE option take an int */
3815 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3816 /* If this is no UDP lite socket, ignore any options. */
3817 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3818 done_socket(sock);
3819 return ENOPROTOOPT;
3820 }
3821 switch (optname) {
3822 case UDPLITE_SEND_CSCOV:
3823 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3824 /* don't allow illegal values! */
3825 sock->conn->pcb.udp->chksum_len_tx = 8;
3826 } else {
3827 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3828 }
3829 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3830 s, (*(const int *)optval)) );
3831 break;
3832 case UDPLITE_RECV_CSCOV:
3833 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3834 /* don't allow illegal values! */
3835 sock->conn->pcb.udp->chksum_len_rx = 8;
3836 } else {
3837 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3838 }
3839 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3840 s, (*(const int *)optval)) );
3841 break;
3842 default:
3843 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3844 s, optname));
3845 err = ENOPROTOOPT;
3846 break;
3847 } /* switch (optname) */
3848 break;
3849 #endif /* LWIP_UDP */
3850 /* Level: IPPROTO_RAW */
3851 case IPPROTO_RAW:
3852 switch (optname) {
3853 #if LWIP_IPV6 && LWIP_RAW
3854 case IPV6_CHECKSUM:
3855 /* It should not be possible to disable the checksum generation with ICMPv6
3856 * as per RFC 3542 chapter 3.1 */
3857 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3858 done_socket(sock);
3859 return EINVAL;
3860 }
3861
3862 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3863 if (*(const int *)optval < 0) {
3864 sock->conn->pcb.raw->chksum_reqd = 0;
3865 } else if (*(const int *)optval & 1) {
3866 /* Per RFC3542, odd offsets are not allowed */
3867 done_socket(sock);
3868 return EINVAL;
3869 } else {
3870 sock->conn->pcb.raw->chksum_reqd = 1;
3871 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3872 }
3873 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3874 s, sock->conn->pcb.raw->chksum_reqd));
3875 break;
3876 #endif /* LWIP_IPV6 && LWIP_RAW */
3877 default:
3878 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3879 s, optname));
3880 err = ENOPROTOOPT;
3881 break;
3882 } /* switch (optname) */
3883 break;
3884 default:
3885 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3886 s, level, optname));
3887 err = ENOPROTOOPT;
3888 break;
3889 } /* switch (level) */
3890
3891 done_socket(sock);
3892 return err;
3893 }
3894
3895 int
3896 lwip_ioctl(int s, long cmd, void *argp)
3897 {
3898 struct lwip_sock *sock = get_socket(s);
3899 u8_t val;
3900 #if LWIP_SO_RCVBUF
3901 int recv_avail;
3902 #endif /* LWIP_SO_RCVBUF */
3903
3904 if (!sock) {
3905 return -1;
3906 }
3907
3908 switch (cmd) {
3909 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3910 case FIONREAD:
3911 if (!argp) {
3912 sock_set_errno(sock, EINVAL);
3913 done_socket(sock);
3914 return -1;
3915 }
3916 #if LWIP_FIONREAD_LINUXMODE
3917 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3918 struct netbuf *nb;
3919 if (sock->lastdata.netbuf) {
3920 nb = sock->lastdata.netbuf;
3921 *((int *)argp) = nb->p->tot_len;
3922 } else {
3923 struct netbuf *rxbuf;
3924 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3925 if (err != ERR_OK) {
3926 *((int *)argp) = 0;
3927 } else {
3928 sock->lastdata.netbuf = rxbuf;
3929 *((int *)argp) = rxbuf->p->tot_len;
3930 }
3931 }
3932 done_socket(sock);
3933 return 0;
3934 }
3935 #endif /* LWIP_FIONREAD_LINUXMODE */
3936
3937 #if LWIP_SO_RCVBUF
3938 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3939 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3940 if (recv_avail < 0) {
3941 recv_avail = 0;
3942 }
3943
3944 /* Check if there is data left from the last recv operation. /maq 041215 */
3945 if (sock->lastdata.netbuf) {
3946 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3947 recv_avail += sock->lastdata.pbuf->tot_len;
3948 } else {
3949 recv_avail += sock->lastdata.netbuf->p->tot_len;
3950 }
3951 }
3952 *((int *)argp) = recv_avail;
3953
3954 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3955 sock_set_errno(sock, 0);
3956 done_socket(sock);
3957 return 0;
3958 #else /* LWIP_SO_RCVBUF */
3959 break;
3960 #endif /* LWIP_SO_RCVBUF */
3961 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3962
3963 case (long)FIONBIO:
3964 val = 0;
3965 if (argp && *(int *)argp) {
3966 val = 1;
3967 }
3968 netconn_set_nonblocking(sock->conn, val);
3969 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3970 sock_set_errno(sock, 0);
3971 done_socket(sock);
3972 return 0;
3973
3974 default:
3975 break;
3976 } /* switch (cmd) */
3977 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3978 sock_set_errno(sock, ENOSYS); /* not yet implemented */
3979 done_socket(sock);
3980 return -1;
3981 }
3982
3983 /** A minimal implementation of fcntl.
3984 * Currently only the commands F_GETFL and F_SETFL are implemented.
3985 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3986 * the flag O_NONBLOCK is implemented for F_SETFL.
3987 */
3988 int
3989 lwip_fcntl(int s, int cmd, int val)
3990 {
3991 struct lwip_sock *sock = get_socket(s);
3992 int ret = -1;
3993 int op_mode = 0;
3994
3995 if (!sock) {
3996 return -1;
3997 }
3998
3999 switch (cmd) {
4000 case F_GETFL:
4001 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
4002 sock_set_errno(sock, 0);
4003
4004 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
4005 #if LWIP_TCPIP_CORE_LOCKING
4006 LOCK_TCPIP_CORE();
4007 #else
4008 SYS_ARCH_DECL_PROTECT(lev);
4009 /* the proper thing to do here would be to get into the tcpip_thread,
4010 but locking should be OK as well since we only *read* some flags */
4011 SYS_ARCH_PROTECT(lev);
4012 #endif
4013 #if LWIP_TCP
4014 if (sock->conn->pcb.tcp) {
4015 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
4016 op_mode |= O_RDONLY;
4017 }
4018 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
4019 op_mode |= O_WRONLY;
4020 }
4021 }
4022 #endif
4023 #if LWIP_TCPIP_CORE_LOCKING
4024 UNLOCK_TCPIP_CORE();
4025 #else
4026 SYS_ARCH_UNPROTECT(lev);
4027 #endif
4028 } else {
4029 op_mode |= O_RDWR;
4030 }
4031
4032 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
4033 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
4034
4035 break;
4036 case F_SETFL:
4037 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
4038 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
4039 if ((val & ~O_NONBLOCK) == 0) {
4040 /* only O_NONBLOCK, all other bits are zero */
4041 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
4042 ret = 0;
4043 sock_set_errno(sock, 0);
4044 } else {
4045 sock_set_errno(sock, ENOSYS); /* not yet implemented */
4046 }
4047 break;
4048 default:
4049 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
4050 sock_set_errno(sock, ENOSYS); /* not yet implemented */
4051 break;
4052 }
4053 done_socket(sock);
4054 return ret;
4055 }
4056
4057 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
4058 int
4059 fcntl(int s, int cmd, ...)
4060 {
4061 va_list ap;
4062 int val;
4063
4064 va_start(ap, cmd);
4065 val = va_arg(ap, int);
4066 va_end(ap);
4067 return lwip_fcntl(s, cmd, val);
4068 }
4069 #endif
4070
4071 const char *
4072 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
4073 {
4074 const char *ret = NULL;
4075 int size_int = (int)size;
4076 if (size_int < 0) {
4077 set_errno(ENOSPC);
4078 return NULL;
4079 }
4080 switch (af) {
4081 #if LWIP_IPV4
4082 case AF_INET:
4083 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
4084 if (ret == NULL) {
4085 set_errno(ENOSPC);
4086 }
4087 break;
4088 #endif
4089 #if LWIP_IPV6
4090 case AF_INET6:
4091 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
4092 if (ret == NULL) {
4093 set_errno(ENOSPC);
4094 }
4095 break;
4096 #endif
4097 default:
4098 set_errno(EAFNOSUPPORT);
4099 break;
4100 }
4101 return ret;
4102 }
4103
4104 int
4105 lwip_inet_pton(int af, const char *src, void *dst)
4106 {
4107 int err;
4108 switch (af) {
4109 #if LWIP_IPV4
4110 case AF_INET:
4111 err = ip4addr_aton(src, (ip4_addr_t *)dst);
4112 break;
4113 #endif
4114 #if LWIP_IPV6
4115 case AF_INET6: {
4116 /* convert into temporary variable since ip6_addr_t might be larger
4117 than in6_addr when scopes are enabled */
4118 ip6_addr_t addr;
4119 err = ip6addr_aton(src, &addr);
4120 if (err) {
4121 memcpy(dst, &addr.addr, sizeof(addr.addr));
4122 }
4123 break;
4124 }
4125 #endif
4126 default:
4127 err = -1;
4128 set_errno(EAFNOSUPPORT);
4129 break;
4130 }
4131 return err;
4132 }
4133
4134 #if LWIP_IGMP
4135 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
4136 *
4137 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4138 *
4139 * @return 1 on success, 0 on failure
4140 */
4141 static int
4142 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4143 {
4144 struct lwip_sock *sock = get_socket(s);
4145 int i;
4146
4147 if (!sock) {
4148 return 0;
4149 }
4150
4151 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4152 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4153 socket_ipv4_multicast_memberships[i].sock = sock;
4154 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4155 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4156 done_socket(sock);
4157 return 1;
4158 }
4159 }
4160 done_socket(sock);
4161 return 0;
4162 }
4163
4164 /** Unregister a previously registered membership. This prevents dropping the membership
4165 * on socket close.
4166 *
4167 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4168 */
4169 static void
4170 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4171 {
4172 struct lwip_sock *sock = get_socket(s);
4173 int i;
4174
4175 if (!sock) {
4176 return;
4177 }
4178
4179 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4180 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4181 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4182 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4183 socket_ipv4_multicast_memberships[i].sock = NULL;
4184 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4185 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4186 break;
4187 }
4188 }
4189 done_socket(sock);
4190 }
4191
4192 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4193 *
4194 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4195 */
4196 static void
4197 lwip_socket_drop_registered_memberships(int s)
4198 {
4199 struct lwip_sock *sock = get_socket(s);
4200 int i;
4201
4202 if (!sock) {
4203 return;
4204 }
4205
4206 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4207 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4208 ip_addr_t multi_addr, if_addr;
4209 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4210 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4211 socket_ipv4_multicast_memberships[i].sock = NULL;
4212 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4213 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4214
4215 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4216 }
4217 }
4218 done_socket(sock);
4219 }
4220 #endif /* LWIP_IGMP */
4221
4222 #if LWIP_IPV6_MLD
4223 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4224 *
4225 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4226 *
4227 * @return 1 on success, 0 on failure
4228 */
4229 static int
4230 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4231 {
4232 struct lwip_sock *sock = get_socket(s);
4233 int i;
4234
4235 if (!sock) {
4236 return 0;
4237 }
4238
4239 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4240 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4241 socket_ipv6_multicast_memberships[i].sock = sock;
4242 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4243 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4244 done_socket(sock);
4245 return 1;
4246 }
4247 }
4248 done_socket(sock);
4249 return 0;
4250 }
4251
4252 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4253 * on socket close.
4254 *
4255 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4256 */
4257 static void
4258 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4259 {
4260 struct lwip_sock *sock = get_socket(s);
4261 int i;
4262
4263 if (!sock) {
4264 return;
4265 }
4266
4267 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4268 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4269 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4270 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4271 socket_ipv6_multicast_memberships[i].sock = NULL;
4272 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4273 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4274 break;
4275 }
4276 }
4277 done_socket(sock);
4278 }
4279
4280 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4281 *
4282 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4283 */
4284 static void
4285 lwip_socket_drop_registered_mld6_memberships(int s)
4286 {
4287 struct lwip_sock *sock = get_socket(s);
4288 int i;
4289
4290 if (!sock) {
4291 return;
4292 }
4293
4294 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4295 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4296 ip_addr_t multi_addr;
4297 u8_t if_idx;
4298
4299 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4300 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4301
4302 socket_ipv6_multicast_memberships[i].sock = NULL;
4303 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4304 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4305
4306 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4307 }
4308 }
4309 done_socket(sock);
4310 }
4311 #endif /* LWIP_IPV6_MLD */
4312
4313 #endif /* LWIP_SOCKET */
4314