1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <adam@sics.se>
35 *
36 * Improved by Marc Boucher <marc@mbsi.ca> and David Haas <dhaas@alum.rpi.edu>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/igmp.h"
48 #include "lwip/inet.h"
49 #include "lwip/tcp.h"
50 #include "lwip/raw.h"
51 #include "lwip/udp.h"
52 #include "lwip/memp.h"
53 #include "lwip/pbuf.h"
54 #include "lwip/netif.h"
55 #include "lwip/priv/tcpip_priv.h"
56 #include "lwip/mld6.h"
57 #if LWIP_ENABLE_DISTRIBUTED_NET
58 #include "lwip/distributed_net/distributed_net.h"
59 #include "lwip/distributed_net/distributed_net_core.h"
60 #endif /* LWIP_ENABLE_DISTRIBUTED_NET */
61 #if LWIP_CHECKSUM_ON_COPY
62 #include "lwip/inet_chksum.h"
63 #endif
64
65 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
66 #include <stdarg.h>
67 #endif
68
69 #include <string.h>
70
71 #ifdef LWIP_HOOK_FILENAME
72 #include LWIP_HOOK_FILENAME
73 #endif
74
75 #if LWIP_LOWPOWER
76 #include "lwip/lowpower.h"
77 #endif
78
79 /* If the netconn API is not required publicly, then we include the necessary
80 files here to get the implementation */
81 #if !LWIP_NETCONN
82 #undef LWIP_NETCONN
83 #define LWIP_NETCONN 1
84 #include "api_msg.c"
85 #include "api_lib.c"
86 #include "netbuf.c"
87 #undef LWIP_NETCONN
88 #define LWIP_NETCONN 0
89 #endif
90
91 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
92 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
93 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
94 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
95
96 #ifndef LWIP_SOCKET_HAVE_SA_LEN
97 #define LWIP_SOCKET_HAVE_SA_LEN 0
98 #endif /* LWIP_SOCKET_HAVE_SA_LEN */
99
100 /* Address length safe read and write */
101 #if LWIP_SOCKET_HAVE_SA_LEN
102
103 #if LWIP_IPV4
104 #define IP4ADDR_SOCKADDR_SET_LEN(sin) \
105 (sin)->sin_len = sizeof(struct sockaddr_in)
106 #endif /* LWIP_IPV4 */
107
108 #if LWIP_IPV6
109 #define IP6ADDR_SOCKADDR_SET_LEN(sin6) \
110 (sin6)->sin6_len = sizeof(struct sockaddr_in6)
111 #endif /* LWIP_IPV6 */
112
113 #define IPADDR_SOCKADDR_GET_LEN(addr) \
114 (addr)->sa.sa_len
115
116 #else
117
118 #if LWIP_IPV4
119 #define IP4ADDR_SOCKADDR_SET_LEN(addr)
120 #endif /* LWIP_IPV4 */
121
122 #if LWIP_IPV6
123 #define IP6ADDR_SOCKADDR_SET_LEN(addr)
124 #endif /* LWIP_IPV6 */
125
126 #if LWIP_IPV4 && LWIP_IPV6
127 #define IPADDR_SOCKADDR_GET_LEN(addr) \
128 ((addr)->sa.sa_family == AF_INET ? sizeof(struct sockaddr_in) \
129 : ((addr)->sa.sa_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0))
130 #elif LWIP_IPV4
131 #define IPADDR_SOCKADDR_GET_LEN(addr) sizeof(struct sockaddr_in)
132 #elif LWIP_IPV6
133 #define IPADDR_SOCKADDR_GET_LEN(addr) sizeof(struct sockaddr_in6)
134 #else
135 #define IPADDR_SOCKADDR_GET_LEN(addr) sizeof(struct sockaddr)
136 #endif /* LWIP_IPV4 && LWIP_IPV6 */
137
138 #endif /* LWIP_SOCKET_HAVE_SA_LEN */
139
140 #if LWIP_IPV4
141 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
142 IP4ADDR_SOCKADDR_SET_LEN(sin); \
143 (sin)->sin_family = AF_INET; \
144 (sin)->sin_port = lwip_htons((port)); \
145 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
146 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
147 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
148 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
149 (port) = lwip_ntohs((sin)->sin_port); }while(0)
150 #endif /* LWIP_IPV4 */
151
152 #if LWIP_IPV6
153 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
154 IP6ADDR_SOCKADDR_SET_LEN(sin6); \
155 (sin6)->sin6_family = AF_INET6; \
156 (sin6)->sin6_port = lwip_htons((port)); \
157 (sin6)->sin6_flowinfo = 0; \
158 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
159 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
160 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
161 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
162 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
163 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
164 } \
165 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
166 #endif /* LWIP_IPV6 */
167
168 #if LWIP_IPV4 && LWIP_IPV6
169 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
170
171 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
172 ((namelen) == sizeof(struct sockaddr_in6)))
173 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
174 ((name)->sa_family == AF_INET6))
175 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
176 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
177 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
178 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
179 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
180 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
181 } else { \
182 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
183 } } while(0)
184 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
185 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
186 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
187 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
188 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
189 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
190 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
191 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
192 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
193 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
194 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
195 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
196 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
197 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
198 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
199 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
200 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
201 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
202 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
203 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
204 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
205 #endif /* LWIP_IPV6 */
206
207 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
208 IS_SOCK_ADDR_TYPE_VALID(name))
209 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
210 SOCK_ADDR_TYPE_MATCH(name, sock))
211 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % LWIP_MIN(4, MEM_ALIGNMENT)) == 0)
212
213
214 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
215 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
216 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
217 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
218 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
219 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
220 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
221 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
222 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
223 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
224
225
226 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
227 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
228 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
229 #if LWIP_MPU_COMPATIBLE
230 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
231 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
232 if (name == NULL) { \
233 set_errno(ENOMEM); \
234 done_socket(sock); \
235 return -1; \
236 } }while(0)
237 #else /* LWIP_MPU_COMPATIBLE */
238 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
239 #endif /* LWIP_MPU_COMPATIBLE */
240
241 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
242 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
243 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
244 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
245 #else
246 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
247 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
248 u32_t loc = (val); \
249 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
250 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
251 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
252 #endif
253
254
255 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
256 * sockaddr_in6 if instantiated.
257 */
258 union sockaddr_aligned {
259 struct sockaddr sa;
260 #if LWIP_IPV6
261 struct sockaddr_in6 sin6;
262 #endif /* LWIP_IPV6 */
263 #if LWIP_IPV4
264 struct sockaddr_in sin;
265 #endif /* LWIP_IPV4 */
266 };
267
268 /* Define the number of IPv4 multicast memberships, default is one per socket */
269 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
270 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
271 #endif
272
273 #if LWIP_IGMP
274 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
275 a socket is closed */
276 struct lwip_socket_multicast_pair {
277 /** the socket */
278 struct lwip_sock *sock;
279 /** the interface address */
280 ip4_addr_t if_addr;
281 /** the group address */
282 ip4_addr_t multi_addr;
283 };
284
285 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
286
287 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
288 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
289 static void lwip_socket_drop_registered_memberships(int s);
290 #endif /* LWIP_IGMP */
291
292 #if LWIP_IPV6_MLD
293 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
294 a socket is closed */
295 struct lwip_socket_multicast_mld6_pair {
296 /** the socket */
297 struct lwip_sock *sock;
298 /** the interface index */
299 u8_t if_idx;
300 /** the group address */
301 ip6_addr_t multi_addr;
302 };
303
304 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
305
306 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
307 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
308 static void lwip_socket_drop_registered_mld6_memberships(int s);
309 #endif /* LWIP_IPV6_MLD */
310
311 /** The global array of available sockets */
312 static struct lwip_sock sockets[NUM_SOCKETS];
313
314 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
315 #if LWIP_TCPIP_CORE_LOCKING
316 /* protect the select_cb_list using core lock */
317 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
318 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
319 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
320 #else /* LWIP_TCPIP_CORE_LOCKING */
321 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
322 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
323 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
324 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
325 /** This counter is increased from lwip_select when the list is changed
326 and checked in select_check_waiters to see if it has changed. */
327 static volatile int select_cb_ctr;
328 #endif /* LWIP_TCPIP_CORE_LOCKING */
329 /** The global list of tasks waiting for select */
330 static struct lwip_select_cb *select_cb_list;
331 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
332
333 #define sock_set_errno(sk, e) do { \
334 const int sockerr = (e); \
335 set_errno(sockerr); \
336 } while (0)
337
338 /* Forward declaration of some functions */
339 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
340 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
341 #define DEFAULT_SOCKET_EVENTCB event_callback
342 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
343 #else
344 #define DEFAULT_SOCKET_EVENTCB NULL
345 #endif
346 #if !LWIP_TCPIP_CORE_LOCKING
347 static void lwip_getsockopt_callback(void *arg);
348 static void lwip_setsockopt_callback(void *arg);
349 #endif
350 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
351 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
352 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
353 union lwip_sock_lastdata *lastdata);
354 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
355
356 #if LWIP_IPV4 && LWIP_IPV6
357 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)358 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
359 {
360 if ((sockaddr->sa_family) == AF_INET6) {
361 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
362 ipaddr->type = IPADDR_TYPE_V6;
363 } else {
364 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
365 ipaddr->type = IPADDR_TYPE_V4;
366 }
367 }
368 #endif /* LWIP_IPV4 && LWIP_IPV6 */
369
370 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
371 void
lwip_socket_thread_init(void)372 lwip_socket_thread_init(void)
373 {
374 netconn_thread_init();
375 }
376
377 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
378 void
lwip_socket_thread_cleanup(void)379 lwip_socket_thread_cleanup(void)
380 {
381 netconn_thread_cleanup();
382 }
383
384 #if LWIP_NETCONN_FULLDUPLEX
385 /* Thread-safe increment of sock->fd_used, with overflow check */
386 static int
sock_inc_used(struct lwip_sock * sock)387 sock_inc_used(struct lwip_sock *sock)
388 {
389 int ret;
390 SYS_ARCH_DECL_PROTECT(lev);
391
392 LWIP_ASSERT("sock != NULL", sock != NULL);
393
394 SYS_ARCH_PROTECT(lev);
395 if (sock->fd_free_pending) {
396 /* prevent new usage of this socket if free is pending */
397 ret = 0;
398 } else {
399 ++sock->fd_used;
400 ret = 1;
401 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
402 }
403 SYS_ARCH_UNPROTECT(lev);
404 return ret;
405 }
406
407 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
408 static int
sock_inc_used_locked(struct lwip_sock * sock)409 sock_inc_used_locked(struct lwip_sock *sock)
410 {
411 LWIP_ASSERT("sock != NULL", sock != NULL);
412
413 if (sock->fd_free_pending) {
414 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
415 return 0;
416 }
417
418 ++sock->fd_used;
419 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
420 return 1;
421 }
422
423 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
424 * released (and possibly reused) when used from more than one thread
425 * (e.g. read-while-write or close-while-write, etc)
426 * This function is called at the end of functions using (try)get_socket*().
427 */
428 static void
done_socket(struct lwip_sock * sock)429 done_socket(struct lwip_sock *sock)
430 {
431 int freed = 0;
432 int is_tcp = 0;
433 struct netconn *conn = NULL;
434 union lwip_sock_lastdata lastdata;
435 SYS_ARCH_DECL_PROTECT(lev);
436 LWIP_ASSERT("sock != NULL", sock != NULL);
437
438 SYS_ARCH_PROTECT(lev);
439 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
440 if (--sock->fd_used == 0) {
441 if (sock->fd_free_pending) {
442 /* free the socket */
443 sock->fd_used = 1;
444 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
445 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
446 }
447 }
448 SYS_ARCH_UNPROTECT(lev);
449
450 if (freed) {
451 free_socket_free_elements(is_tcp, conn, &lastdata);
452 }
453 }
454
455 #else /* LWIP_NETCONN_FULLDUPLEX */
456 #define sock_inc_used(sock) 1
457 #define sock_inc_used_locked(sock) 1
458 #define done_socket(sock)
459 #endif /* LWIP_NETCONN_FULLDUPLEX */
460
461 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
462 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)463 tryget_socket_unconn_nouse(int fd)
464 {
465 int s = fd - LWIP_SOCKET_OFFSET;
466 if ((s < 0) || (s >= NUM_SOCKETS)) {
467 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
468 return NULL;
469 }
470 return &sockets[s];
471 }
472
473 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)474 lwip_socket_dbg_get_socket(int fd)
475 {
476 return tryget_socket_unconn_nouse(fd);
477 }
478
479 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
480 static struct lwip_sock *
tryget_socket_unconn(int fd)481 tryget_socket_unconn(int fd)
482 {
483 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
484 if (ret != NULL) {
485 if (!sock_inc_used(ret)) {
486 return NULL;
487 }
488 }
489 return ret;
490 }
491
492 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
493 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)494 tryget_socket_unconn_locked(int fd)
495 {
496 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
497 if (ret != NULL) {
498 if (!sock_inc_used_locked(ret)) {
499 return NULL;
500 }
501 }
502 return ret;
503 }
504
505 /**
506 * Same as get_socket but doesn't set errno
507 *
508 * @param fd externally used socket index
509 * @return struct lwip_sock for the socket or NULL if not found
510 */
511 static struct lwip_sock *
tryget_socket(int fd)512 tryget_socket(int fd)
513 {
514 struct lwip_sock *sock = tryget_socket_unconn(fd);
515 if (sock != NULL) {
516 if (sock->conn) {
517 return sock;
518 }
519 done_socket(sock);
520 }
521 return NULL;
522 }
523
524 /**
525 * Map a externally used socket index to the internal socket representation.
526 *
527 * @param fd externally used socket index
528 * @return struct lwip_sock for the socket or NULL if not found
529 */
530 static struct lwip_sock *
get_socket(int fd)531 get_socket(int fd)
532 {
533 struct lwip_sock *sock = tryget_socket(fd);
534 if (!sock) {
535 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
536 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
537 }
538 set_errno(EBADF);
539 return NULL;
540 }
541 return sock;
542 }
543
544 /**
545 * Allocate a new socket for a given netconn.
546 *
547 * @param newconn the netconn for which to allocate a socket
548 * @param accepted 1 if socket has been created by accept(),
549 * 0 if socket has been created by socket()
550 * @return the index of the new socket; -1 on error
551 */
552 static int
alloc_socket(struct netconn * newconn,int accepted)553 alloc_socket(struct netconn *newconn, int accepted)
554 {
555 int i;
556 SYS_ARCH_DECL_PROTECT(lev);
557 LWIP_UNUSED_ARG(accepted);
558
559 /* allocate a new socket identifier */
560 for (i = 0; i < NUM_SOCKETS; ++i) {
561 /* Protect socket array */
562 SYS_ARCH_PROTECT(lev);
563 if (!sockets[i].conn) {
564 #if LWIP_NETCONN_FULLDUPLEX
565 if (sockets[i].fd_used) {
566 SYS_ARCH_UNPROTECT(lev);
567 continue;
568 }
569 sockets[i].fd_used = 1;
570 sockets[i].fd_free_pending = 0;
571 #endif
572 sockets[i].conn = newconn;
573 /* The socket is not yet known to anyone, so no need to protect
574 after having marked it as used. */
575 SYS_ARCH_UNPROTECT(lev);
576 sockets[i].lastdata.pbuf = NULL;
577 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
578 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
579 sockets[i].rcvevent = 0;
580 /* TCP sendbuf is empty, but the socket is not yet writable until connected
581 * (unless it has been created by accept()). */
582 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
583 sockets[i].errevent = 0;
584 init_waitqueue_head(&sockets[i].wq);
585 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
586 return i + LWIP_SOCKET_OFFSET;
587 }
588 SYS_ARCH_UNPROTECT(lev);
589 }
590 return -1;
591 }
592
593 /** Free a socket (under lock)
594 *
595 * @param sock the socket to free
596 * @param is_tcp != 0 for TCP sockets, used to free lastdata
597 * @param conn the socekt's netconn is stored here, must be freed externally
598 * @param lastdata lastdata is stored here, must be freed externally
599 */
600 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)601 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
602 union lwip_sock_lastdata *lastdata)
603 {
604 #if LWIP_NETCONN_FULLDUPLEX
605 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
606 sock->fd_used--;
607 if (sock->fd_used > 0) {
608 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
609 return 0;
610 }
611 #else /* LWIP_NETCONN_FULLDUPLEX */
612 LWIP_UNUSED_ARG(is_tcp);
613 #endif /* LWIP_NETCONN_FULLDUPLEX */
614
615 *lastdata = sock->lastdata;
616 sock->lastdata.pbuf = NULL;
617 *conn = sock->conn;
618 sock->conn = NULL;
619 return 1;
620 }
621
622 /** Free a socket's leftover members.
623 */
624 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)625 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
626 {
627 if (lastdata->pbuf != NULL) {
628 if (is_tcp) {
629 pbuf_free(lastdata->pbuf);
630 } else {
631 netbuf_delete(lastdata->netbuf);
632 }
633 }
634 if (conn != NULL) {
635 /* netconn_prepare_delete() has already been called, here we only free the conn */
636 netconn_delete(conn);
637 }
638 }
639
640 /** Free a socket. The socket's netconn must have been
641 * delete before!
642 *
643 * @param sock the socket to free
644 * @param is_tcp != 0 for TCP sockets, used to free lastdata
645 */
646 static void
free_socket(struct lwip_sock * sock,int is_tcp)647 free_socket(struct lwip_sock *sock, int is_tcp)
648 {
649 int freed;
650 struct netconn *conn;
651 union lwip_sock_lastdata lastdata;
652 SYS_ARCH_DECL_PROTECT(lev);
653
654 /* Protect socket array */
655 SYS_ARCH_PROTECT(lev);
656
657 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
658 SYS_ARCH_UNPROTECT(lev);
659 /* don't use 'sock' after this line, as another task might have allocated it */
660
661 if (freed) {
662 free_socket_free_elements(is_tcp, conn, &lastdata);
663 }
664 }
665
666 /* Below this, the well-known socket functions are implemented.
667 * Use google.com or opengroup.org to get a good description :-)
668 *
669 * Exceptions are documented!
670 */
671
672 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)673 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
674 {
675 struct lwip_sock *sock, *nsock;
676 struct netconn *newconn;
677 ip_addr_t naddr = {0};
678 u16_t port = 0;
679 int newsock;
680 err_t err;
681 int recvevent;
682 SYS_ARCH_DECL_PROTECT(lev);
683
684 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
685 sock = get_socket(s);
686 if (!sock) {
687 return -1;
688 }
689
690 /* wait for a new connection */
691 err = netconn_accept(sock->conn, &newconn);
692 if (err != ERR_OK) {
693 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
694 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
695 set_errno(EOPNOTSUPP);
696 } else if (err == ERR_CLSD) {
697 set_errno(EINVAL);
698 } else {
699 set_errno(err_to_errno(err));
700 }
701 done_socket(sock);
702 return -1;
703 }
704 LWIP_ASSERT("newconn != NULL", newconn != NULL);
705
706 newsock = alloc_socket(newconn, 1);
707 if (newsock == -1) {
708 netconn_delete(newconn);
709 set_errno(ENFILE);
710 done_socket(sock);
711 return -1;
712 }
713 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
714 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
715
716 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
717 * not be NULL if addr is valid.
718 */
719 if ((addr != NULL) && (addrlen != NULL)) {
720 union sockaddr_aligned tempaddr;
721 /* get the IP address and port of the remote host */
722 err = netconn_peer(newconn, &naddr, &port);
723 if (err != ERR_OK) {
724 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
725 free_socket(nsock, 1);
726 set_errno(err_to_errno(err));
727 done_socket(sock);
728 return -1;
729 }
730
731 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
732 if (*addrlen > IPADDR_SOCKADDR_GET_LEN(&tempaddr)) {
733 *addrlen = IPADDR_SOCKADDR_GET_LEN(&tempaddr);
734 }
735 MEMCPY(addr, &tempaddr, *addrlen);
736 }
737
738 /* See event_callback: If data comes in right away after an accept, even
739 * though the server task might not have created a new socket yet.
740 * In that case, newconn->socket is counted down (newconn->socket--),
741 * so nsock->rcvevent is >= 1 here!
742 */
743 SYS_ARCH_PROTECT(lev);
744 recvevent = (s16_t)(-1 - newconn->callback_arg.socket);
745 newconn->callback_arg.socket = newsock;
746 SYS_ARCH_UNPROTECT(lev);
747
748 if (newconn->callback) {
749 LOCK_TCPIP_CORE();
750 while (recvevent > 0) {
751 recvevent--;
752 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
753 }
754 UNLOCK_TCPIP_CORE();
755 }
756
757 if ((addr != NULL) && (addrlen != NULL)) {
758 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
759 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
760 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
761 } else {
762 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d\n", s, newsock));
763 }
764
765 set_errno(0);
766 done_socket(sock);
767 done_socket(nsock);
768 return newsock;
769 }
770
771 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)772 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
773 {
774 struct lwip_sock *sock;
775 ip_addr_t local_addr;
776 u16_t local_port;
777 err_t err;
778
779 sock = get_socket(s);
780 if (!sock) {
781 return -1;
782 }
783
784 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
785 /* sockaddr does not match socket type (IPv4/IPv6) */
786 set_errno(err_to_errno(ERR_VAL));
787 done_socket(sock);
788 return -1;
789 }
790
791 /* check size, family and alignment of 'name' */
792 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
793 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
794 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
795 LWIP_UNUSED_ARG(namelen);
796
797 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
798 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
799 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
800 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
801
802 #if LWIP_IPV4 && LWIP_IPV6
803 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
804 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
805 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
806 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
807 }
808 #endif /* LWIP_IPV4 && LWIP_IPV6 */
809
810 err = netconn_bind(sock->conn, &local_addr, local_port);
811
812 if (err != ERR_OK) {
813 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
814 set_errno(err_to_errno(err));
815 done_socket(sock);
816 return -1;
817 }
818
819 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
820 set_errno(0);
821 done_socket(sock);
822 return 0;
823 }
824
825 int
lwip_close(int s)826 lwip_close(int s)
827 {
828 #if LWIP_ENABLE_DISTRIBUTED_NET
829 if (!is_distributed_net_enabled()) {
830 return lwip_close_internal(s);
831 }
832 return distributed_net_close(s);
833 }
834
835 int
lwip_close_internal(int s)836 lwip_close_internal(int s)
837 {
838 #endif
839 struct lwip_sock *sock;
840 int is_tcp = 0;
841 err_t err;
842
843 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
844
845 sock = get_socket(s);
846 if (!sock) {
847 return -1;
848 }
849
850 if (sock->conn != NULL) {
851 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
852 } else {
853 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
854 }
855
856 #if LWIP_IGMP
857 /* drop all possibly joined IGMP memberships */
858 lwip_socket_drop_registered_memberships(s);
859 #endif /* LWIP_IGMP */
860 #if LWIP_IPV6_MLD
861 /* drop all possibly joined MLD6 memberships */
862 lwip_socket_drop_registered_mld6_memberships(s);
863 #endif /* LWIP_IPV6_MLD */
864
865 err = netconn_prepare_delete(sock->conn);
866 if (err != ERR_OK) {
867 set_errno(err_to_errno(err));
868 done_socket(sock);
869 return -1;
870 }
871
872 free_socket(sock, is_tcp);
873 set_errno(0);
874 return 0;
875 }
876
877 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)878 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
879 {
880 #if LWIP_ENABLE_DISTRIBUTED_NET
881 if (!is_distributed_net_enabled()) {
882 return lwip_connect_internal(s, name, namelen);
883 }
884 return distributed_net_connect(s, name, namelen);
885 }
886
887 int
lwip_connect_internal(int s,const struct sockaddr * name,socklen_t namelen)888 lwip_connect_internal(int s, const struct sockaddr *name, socklen_t namelen)
889 {
890 #endif
891 struct lwip_sock *sock;
892 err_t err;
893
894 sock = get_socket(s);
895 if (!sock) {
896 return -1;
897 }
898
899 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
900 /* sockaddr does not match socket type (IPv4/IPv6) */
901 set_errno(err_to_errno(ERR_VAL));
902 done_socket(sock);
903 return -1;
904 }
905
906 LWIP_UNUSED_ARG(namelen);
907 if (name->sa_family == AF_UNSPEC) {
908 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
909 err = netconn_disconnect(sock->conn);
910 } else {
911 ip_addr_t remote_addr;
912 u16_t remote_port;
913
914 /* check size, family and alignment of 'name' */
915 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
916 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
917 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
918
919 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
920 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
921 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
922 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
923
924 #if LWIP_IPV4 && LWIP_IPV6
925 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
926 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
927 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
928 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
929 }
930 #endif /* LWIP_IPV4 && LWIP_IPV6 */
931
932 err = netconn_connect(sock->conn, &remote_addr, remote_port);
933 }
934
935 if (err != ERR_OK) {
936 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
937 set_errno(err_to_errno(err));
938 done_socket(sock);
939 return -1;
940 }
941
942 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
943 set_errno(0);
944 done_socket(sock);
945 return 0;
946 }
947
948 /**
949 * Set a socket into listen mode.
950 * The socket may not have been used for another connection previously.
951 *
952 * @param s the socket to set to listening mode
953 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
954 * @return 0 on success, non-zero on failure
955 */
956 int
lwip_listen(int s,int backlog)957 lwip_listen(int s, int backlog)
958 {
959 struct lwip_sock *sock;
960 err_t err;
961
962 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
963
964 sock = get_socket(s);
965 if (!sock) {
966 return -1;
967 }
968
969 /* limit the "backlog" parameter to fit in an u8_t */
970 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
971
972 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
973
974 if (err != ERR_OK) {
975 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
976 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
977 set_errno(EOPNOTSUPP);
978 } else {
979 set_errno(err_to_errno(err));
980 }
981 done_socket(sock);
982 return -1;
983 }
984
985 set_errno(0);
986 done_socket(sock);
987 return 0;
988 }
989
990 #if LWIP_TCP
991 /* Helper function to loop over receiving pbufs from netconn
992 * until "len" bytes are received or we're otherwise done.
993 * Keeps sock->lastdata for peeking or partly copying.
994 */
995 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)996 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
997 {
998 u8_t apiflags = NETCONN_NOAUTORCVD;
999 ssize_t recvd = 0;
1000 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
1001
1002 LWIP_ASSERT("no socket given", sock != NULL);
1003 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
1004
1005 if (flags & MSG_DONTWAIT) {
1006 apiflags |= NETCONN_DONTBLOCK;
1007 }
1008
1009 do {
1010 struct pbuf *p;
1011 err_t err;
1012 u16_t copylen;
1013
1014 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
1015 /* Check if there is data left from the last recv operation. */
1016 if (sock->lastdata.pbuf) {
1017 p = sock->lastdata.pbuf;
1018 } else {
1019 /* No data was left from the previous operation, so we try to get
1020 some from the network. */
1021 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
1022 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
1023 err, (void *)p));
1024
1025 if (err != ERR_OK) {
1026 if (recvd > 0) {
1027 /* already received data, return that (this trusts in getting the same error from
1028 netconn layer again next time netconn_recv is called) */
1029 goto lwip_recv_tcp_done;
1030 }
1031 /* We should really do some error checking here. */
1032 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
1033 lwip_strerr(err)));
1034 set_errno(err_to_errno(err));
1035 if (err == ERR_CLSD) {
1036 return 0;
1037 } else {
1038 return -1;
1039 }
1040 }
1041 LWIP_ASSERT("p != NULL", p != NULL);
1042 sock->lastdata.pbuf = p;
1043 }
1044
1045 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
1046 p->tot_len, (int)recv_left, (int)recvd));
1047
1048 if (recv_left > p->tot_len) {
1049 copylen = p->tot_len;
1050 } else {
1051 copylen = (u16_t)recv_left;
1052 }
1053 if (recvd > SSIZE_MAX - copylen) {
1054 /* overflow */
1055 copylen = (u16_t)(SSIZE_MAX - recvd);
1056 }
1057
1058 /* copy the contents of the received buffer into
1059 the supplied memory pointer mem */
1060 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
1061
1062 recvd += copylen;
1063
1064 /* TCP combines multiple pbufs for one recv */
1065 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
1066 recv_left -= copylen;
1067
1068 /* Unless we peek the incoming message... */
1069 if ((flags & MSG_PEEK) == 0) {
1070 /* ... check if there is data left in the pbuf */
1071 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
1072 if (p->tot_len - copylen > 0) {
1073 /* If so, it should be saved in the sock structure for the next recv call.
1074 We store the pbuf but hide/free the consumed data: */
1075 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
1076 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
1077 } else {
1078 sock->lastdata.pbuf = NULL;
1079 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
1080 pbuf_free(p);
1081 }
1082 }
1083 /* once we have some data to return, only add more if we don't need to wait */
1084 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1085 /* @todo: do we need to support peeking more than one pbuf? */
1086 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1087 lwip_recv_tcp_done:
1088 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1089 /* ensure window update after copying all data */
1090 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1091 }
1092 set_errno(0);
1093 return recvd;
1094 }
1095 #endif
1096
1097 /* Convert a netbuf's address data to struct sockaddr */
1098 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1099 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1100 struct sockaddr *from, socklen_t *fromlen)
1101 {
1102 int truncated = 0;
1103 union sockaddr_aligned saddr;
1104
1105 LWIP_UNUSED_ARG(conn);
1106
1107 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1108 LWIP_ASSERT("from != NULL", from != NULL);
1109 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1110
1111 #if LWIP_IPV4 && LWIP_IPV6
1112 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1113 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1114 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1115 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1116 }
1117 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1118
1119 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1120 DF_NADDR(*fromaddr);
1121 if (*fromlen < IPADDR_SOCKADDR_GET_LEN(&saddr)) {
1122 truncated = 1;
1123 } else if (*fromlen > IPADDR_SOCKADDR_GET_LEN(&saddr)) {
1124 *fromlen = IPADDR_SOCKADDR_GET_LEN(&saddr);
1125 }
1126 MEMCPY(from, &saddr, *fromlen);
1127 return truncated;
1128 }
1129
1130 #if LWIP_TCP
1131 /* Helper function to get a tcp socket's remote address info */
1132 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1133 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1134 {
1135 if (sock == NULL) {
1136 return 0;
1137 }
1138 LWIP_UNUSED_ARG(dbg_fn);
1139 LWIP_UNUSED_ARG(dbg_s);
1140 LWIP_UNUSED_ARG(dbg_ret);
1141
1142 #if !SOCKETS_DEBUG
1143 if (from && fromlen)
1144 #endif /* !SOCKETS_DEBUG */
1145 {
1146 /* get remote addr/port from tcp_pcb */
1147 u16_t port;
1148 ip_addr_t tmpaddr;
1149 err_t err = netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1150 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1151 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1152 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1153 if (!err && from && fromlen) {
1154 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1155 }
1156 }
1157 return 0;
1158 }
1159 #endif
1160
1161 /* Helper function to receive a netbuf from a udp or raw netconn.
1162 * Keeps sock->lastdata for peeking.
1163 */
1164 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1165 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1166 {
1167 struct netbuf *buf;
1168 u8_t apiflags;
1169 err_t err;
1170 u16_t buflen, copylen, copied;
1171 msg_iovlen_t i;
1172
1173 LWIP_UNUSED_ARG(dbg_s);
1174 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1175
1176 if (flags & MSG_DONTWAIT) {
1177 apiflags = NETCONN_DONTBLOCK;
1178 } else {
1179 apiflags = 0;
1180 }
1181
1182 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1183 /* Check if there is data left from the last recv operation. */
1184 buf = sock->lastdata.netbuf;
1185 if (buf == NULL) {
1186 /* No data was left from the previous operation, so we try to get
1187 some from the network. */
1188 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1189 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1190 err, (void *)buf));
1191
1192 if (err != ERR_OK) {
1193 return err;
1194 }
1195 LWIP_ASSERT("buf != NULL", buf != NULL);
1196 sock->lastdata.netbuf = buf;
1197 }
1198 buflen = buf->p->tot_len;
1199 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1200
1201 copied = 0;
1202 /* copy the pbuf payload into the iovs */
1203 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1204 u16_t len_left = (u16_t)(buflen - copied);
1205 if (msg->msg_iov[i].iov_len > len_left) {
1206 copylen = len_left;
1207 } else {
1208 copylen = (u16_t)msg->msg_iov[i].iov_len;
1209 }
1210
1211 /* copy the contents of the received buffer into
1212 the supplied memory buffer */
1213 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1214 copied = (u16_t)(copied + copylen);
1215 }
1216
1217 /* Check to see from where the data was.*/
1218 #if !SOCKETS_DEBUG
1219 if (msg->msg_name && msg->msg_namelen)
1220 #endif /* !SOCKETS_DEBUG */
1221 {
1222 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1223 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1224 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1225 if (msg->msg_name && msg->msg_namelen) {
1226 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1227 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1228 }
1229 }
1230
1231 /* Initialize flag output */
1232 msg->msg_flags = 0;
1233
1234 if (msg->msg_control) {
1235 u8_t wrote_msg = 0;
1236 #if LWIP_NETBUF_RECVINFO
1237 /* Check if packet info was recorded */
1238 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1239 if (IP_IS_V4(&buf->toaddr)) {
1240 #if LWIP_IPV4
1241 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1242 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1243 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1244 chdr->cmsg_level = IPPROTO_IP;
1245 chdr->cmsg_type = IP_PKTINFO;
1246 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1247 pkti->ipi_ifindex = buf->p->if_idx;
1248 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1249 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1250 wrote_msg = 1;
1251 } else {
1252 msg->msg_flags |= MSG_CTRUNC;
1253 }
1254 #endif /* LWIP_IPV4 */
1255 }
1256 }
1257 #endif /* LWIP_NETBUF_RECVINFO */
1258
1259 if (!wrote_msg) {
1260 msg->msg_controllen = 0;
1261 }
1262 }
1263
1264 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1265 if ((flags & MSG_PEEK) == 0) {
1266 sock->lastdata.netbuf = NULL;
1267 netbuf_delete(buf);
1268 }
1269 if (datagram_len) {
1270 *datagram_len = buflen;
1271 }
1272 return ERR_OK;
1273 }
1274
1275 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1276 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1277 struct sockaddr *from, socklen_t *fromlen)
1278 {
1279 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1280 if (!is_distributed_net_enabled()) {
1281 return lwip_recvfrom_internal(s, mem, len, flags, from, fromlen);
1282 }
1283 return distributed_net_recvfrom(s, mem, len, flags, from, fromlen);
1284 }
1285
1286 ssize_t
lwip_recvfrom_internal(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1287 lwip_recvfrom_internal(int s, void *mem, size_t len, int flags,
1288 struct sockaddr *from, socklen_t *fromlen)
1289 {
1290 #endif
1291 struct lwip_sock *sock;
1292 ssize_t ret;
1293
1294 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1295 sock = get_socket(s);
1296 if (!sock) {
1297 return -1;
1298 }
1299 #if LWIP_TCP
1300 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1301 ret = lwip_recv_tcp(sock, mem, len, flags);
1302 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1303 done_socket(sock);
1304 return ret;
1305 } else
1306 #endif
1307 {
1308 u16_t datagram_len = 0;
1309 struct iovec vec;
1310 struct msghdr msg;
1311 err_t err;
1312 vec.iov_base = mem;
1313 vec.iov_len = len;
1314 msg.msg_control = NULL;
1315 msg.msg_controllen = 0;
1316 msg.msg_flags = 0;
1317 msg.msg_iov = &vec;
1318 msg.msg_iovlen = 1;
1319 msg.msg_name = from;
1320 msg.msg_namelen = (fromlen ? *fromlen : 0);
1321 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1322 if (err != ERR_OK) {
1323 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1324 s, lwip_strerr(err)));
1325 set_errno(err_to_errno(err));
1326 done_socket(sock);
1327 return -1;
1328 }
1329 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1330 if (fromlen) {
1331 *fromlen = msg.msg_namelen;
1332 }
1333 }
1334
1335 set_errno(0);
1336 done_socket(sock);
1337 return ret;
1338 }
1339
1340 ssize_t
lwip_read(int s,void * mem,size_t len)1341 lwip_read(int s, void *mem, size_t len)
1342 {
1343 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1344 }
1345
1346 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1347 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1348 {
1349 struct msghdr msg;
1350
1351 msg.msg_name = NULL;
1352 msg.msg_namelen = 0;
1353 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1354 Blame the opengroup standard for this inconsistency. */
1355 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1356 msg.msg_iovlen = iovcnt;
1357 msg.msg_control = NULL;
1358 msg.msg_controllen = 0;
1359 msg.msg_flags = 0;
1360 return lwip_recvmsg(s, &msg, 0);
1361 }
1362
1363 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1364 lwip_recv(int s, void *mem, size_t len, int flags)
1365 {
1366 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1367 }
1368
1369 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1370 lwip_recvmsg(int s, struct msghdr *message, int flags)
1371 {
1372 struct lwip_sock *sock;
1373 msg_iovlen_t i;
1374 ssize_t buflen;
1375
1376 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1377 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1378 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1379 set_errno(EOPNOTSUPP); return -1;);
1380
1381 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1382 set_errno(EMSGSIZE);
1383 return -1;
1384 }
1385
1386 sock = get_socket(s);
1387 if (!sock) {
1388 return -1;
1389 }
1390
1391 /* check for valid vectors */
1392 buflen = 0;
1393 for (i = 0; i < message->msg_iovlen; i++) {
1394 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1395 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1396 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1397 set_errno(err_to_errno(ERR_VAL));
1398 done_socket(sock);
1399 return -1;
1400 }
1401 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1402 }
1403
1404 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1405 #if LWIP_TCP
1406 int recv_flags = flags;
1407 message->msg_flags = 0;
1408 /* recv the data */
1409 buflen = 0;
1410 for (i = 0; i < message->msg_iovlen; i++) {
1411 /* try to receive into this vector's buffer */
1412 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1413 if (recvd_local > 0) {
1414 /* sum up received bytes */
1415 buflen += recvd_local;
1416 }
1417 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1418 (flags & MSG_PEEK)) {
1419 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1420 if (buflen <= 0) {
1421 /* nothing received at all, propagate the error */
1422 buflen = recvd_local;
1423 }
1424 break;
1425 }
1426 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1427 recv_flags |= MSG_DONTWAIT;
1428 }
1429 if (buflen > 0) {
1430 /* reset socket error since we have received something */
1431 set_errno(0);
1432 }
1433 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1434 done_socket(sock);
1435 return buflen;
1436 #else /* LWIP_TCP */
1437 set_errno(err_to_errno(ERR_ARG));
1438 done_socket(sock);
1439 return -1;
1440 #endif /* LWIP_TCP */
1441 }
1442 /* else, UDP and RAW NETCONNs */
1443 #if LWIP_UDP || LWIP_RAW
1444 {
1445 u16_t datagram_len = 0;
1446 err_t err;
1447 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1448 if (err != ERR_OK) {
1449 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1450 s, lwip_strerr(err)));
1451 set_errno(err_to_errno(err));
1452 done_socket(sock);
1453 return -1;
1454 }
1455 if (datagram_len > buflen) {
1456 message->msg_flags |= MSG_TRUNC;
1457 }
1458
1459 set_errno(0);
1460 done_socket(sock);
1461 return (int)datagram_len;
1462 }
1463 #else /* LWIP_UDP || LWIP_RAW */
1464 set_errno(err_to_errno(ERR_ARG));
1465 done_socket(sock);
1466 return -1;
1467 #endif /* LWIP_UDP || LWIP_RAW */
1468 }
1469
1470 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1471 lwip_send(int s, const void *data, size_t size, int flags)
1472 {
1473 struct lwip_sock *sock;
1474 err_t err;
1475 u8_t write_flags;
1476 size_t written;
1477
1478 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1479 s, data, size, flags));
1480
1481 sock = get_socket(s);
1482 if (!sock) {
1483 return -1;
1484 }
1485
1486 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1487 #if (LWIP_UDP || LWIP_RAW)
1488 done_socket(sock);
1489 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1490 return lwip_sendto_internal(s, data, size, flags, NULL, 0);
1491 #else
1492 return lwip_sendto(s, data, size, flags, NULL, 0);
1493 #endif
1494 #else /* (LWIP_UDP || LWIP_RAW) */
1495 set_errno(err_to_errno(ERR_ARG));
1496 done_socket(sock);
1497 return -1;
1498 #endif /* (LWIP_UDP || LWIP_RAW) */
1499 }
1500
1501 write_flags = (u8_t)(NETCONN_COPY |
1502 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1503 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1504 written = 0;
1505 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1506
1507 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1508 set_errno(err_to_errno(err));
1509 done_socket(sock);
1510 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1511 return (err == ERR_OK ? (ssize_t)written : -1);
1512 }
1513
1514 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1515 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1516 {
1517 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL && LWIP_DISTRIBUTED_NET_ENABLE_SENDMSG
1518 if (!is_distributed_net_enabled()) {
1519 return lwip_sendmsg_internal(s, msg, flags);
1520 }
1521 return distributed_net_sendmsg(s, msg, flags);
1522 }
1523
1524 ssize_t
lwip_sendmsg_internal(int s,const struct msghdr * msg,int flags)1525 lwip_sendmsg_internal(int s, const struct msghdr *msg, int flags)
1526 {
1527 #endif
1528 struct lwip_sock *sock;
1529 #if LWIP_TCP
1530 u8_t write_flags;
1531 size_t written;
1532 #endif
1533 err_t err = ERR_OK;
1534
1535 sock = get_socket(s);
1536 if (!sock) {
1537 return -1;
1538 }
1539
1540 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1541 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1542 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1543 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1544 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1545 set_errno(EMSGSIZE); done_socket(sock); return -1;);
1546 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1547 set_errno(EOPNOTSUPP); done_socket(sock); return -1;);
1548
1549 LWIP_UNUSED_ARG(msg->msg_control);
1550 LWIP_UNUSED_ARG(msg->msg_controllen);
1551 LWIP_UNUSED_ARG(msg->msg_flags);
1552
1553 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1554 #if LWIP_TCP
1555 write_flags = (u8_t)(NETCONN_COPY |
1556 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1557 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1558
1559 written = 0;
1560 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1561 set_errno(err_to_errno(err));
1562 done_socket(sock);
1563 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1564 return (err == ERR_OK ? (ssize_t)written : -1);
1565 #else /* LWIP_TCP */
1566 set_errno(err_to_errno(ERR_ARG));
1567 done_socket(sock);
1568 return -1;
1569 #endif /* LWIP_TCP */
1570 }
1571 /* else, UDP and RAW NETCONNs */
1572 #if LWIP_UDP || LWIP_RAW
1573 {
1574 struct netbuf chain_buf;
1575 msg_iovlen_t i;
1576 ssize_t size = 0;
1577
1578 LWIP_UNUSED_ARG(flags);
1579 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1580 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1581 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1582
1583 /* initialize chain buffer with destination */
1584 memset(&chain_buf, 0, sizeof(struct netbuf));
1585 if (msg->msg_name) {
1586 u16_t remote_port;
1587 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1588 netbuf_fromport(&chain_buf) = remote_port;
1589 }
1590 #if LWIP_NETIF_TX_SINGLE_PBUF
1591 for (i = 0; i < msg->msg_iovlen; i++) {
1592 size += msg->msg_iov[i].iov_len;
1593 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1594 /* overflow */
1595 goto sendmsg_emsgsize;
1596 }
1597 }
1598 if (size > 0xFFFF) {
1599 /* overflow */
1600 goto sendmsg_emsgsize;
1601 }
1602 /* Allocate a new netbuf and copy the data into it. */
1603 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1604 err = ERR_MEM;
1605 } else {
1606 /* flatten the IO vectors */
1607 size_t offset = 0;
1608 for (i = 0; i < msg->msg_iovlen; i++) {
1609 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1610 offset += msg->msg_iov[i].iov_len;
1611 }
1612 #if LWIP_CHECKSUM_ON_COPY
1613 {
1614 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1615 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1616 netbuf_set_chksum(&chain_buf, chksum);
1617 }
1618 #endif /* LWIP_CHECKSUM_ON_COPY */
1619 err = ERR_OK;
1620 }
1621 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1622 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1623 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1624 for (i = 0; i < msg->msg_iovlen; i++) {
1625 struct pbuf *p;
1626 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1627 /* overflow */
1628 goto sendmsg_emsgsize;
1629 }
1630 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1631 if (p == NULL) {
1632 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1633 break;
1634 }
1635 p->payload = msg->msg_iov[i].iov_base;
1636 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1637 /* netbuf empty, add new pbuf */
1638 if (chain_buf.p == NULL) {
1639 chain_buf.p = chain_buf.ptr = p;
1640 /* add pbuf to existing pbuf chain */
1641 } else {
1642 if (chain_buf.p->tot_len + p->len > 0xffff) {
1643 /* overflow */
1644 pbuf_free(p);
1645 goto sendmsg_emsgsize;
1646 }
1647 pbuf_cat(chain_buf.p, p);
1648 }
1649 }
1650 /* save size of total chain */
1651 if (err == ERR_OK) {
1652 size = netbuf_len(&chain_buf);
1653 }
1654 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1655
1656 if (err == ERR_OK) {
1657 #if LWIP_IPV4 && LWIP_IPV6
1658 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1659 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1660 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1661 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1662 }
1663 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1664
1665 /* send the data */
1666 err = netconn_send(sock->conn, &chain_buf);
1667 }
1668
1669 /* deallocated the buffer */
1670 netbuf_free(&chain_buf);
1671
1672 set_errno(err_to_errno(err));
1673 done_socket(sock);
1674 return (err == ERR_OK ? size : -1);
1675 sendmsg_emsgsize:
1676 set_errno(EMSGSIZE);
1677 netbuf_free(&chain_buf);
1678 done_socket(sock);
1679 return -1;
1680 }
1681 #else /* LWIP_UDP || LWIP_RAW */
1682 set_errno(err_to_errno(ERR_ARG));
1683 done_socket(sock);
1684 return -1;
1685 #endif /* LWIP_UDP || LWIP_RAW */
1686 }
1687
1688 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1689 lwip_sendto(int s, const void *data, size_t size, int flags,
1690 const struct sockaddr *to, socklen_t tolen)
1691 {
1692 #if LWIP_ENABLE_DISTRIBUTED_NET && LWIP_USE_GET_HOST_BY_NAME_EXTERNAL
1693 if (!is_distributed_net_enabled()) {
1694 return lwip_sendto_internal(s, data, size, flags, to, tolen);
1695 }
1696 return distributed_net_sendto(s, data, size, flags, to, tolen);
1697 }
1698
1699 ssize_t
lwip_sendto_internal(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1700 lwip_sendto_internal(int s, const void *data, size_t size, int flags,
1701 const struct sockaddr *to, socklen_t tolen)
1702 {
1703 #endif
1704 struct lwip_sock *sock;
1705 err_t err;
1706 u16_t short_size;
1707 u16_t remote_port;
1708 struct netbuf buf;
1709
1710 sock = get_socket(s);
1711 if (!sock) {
1712 return -1;
1713 }
1714
1715 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1716 #if LWIP_TCP
1717 done_socket(sock);
1718 return lwip_send(s, data, size, flags);
1719 #else /* LWIP_TCP */
1720 LWIP_UNUSED_ARG(flags);
1721 set_errno(err_to_errno(ERR_ARG));
1722 done_socket(sock);
1723 return -1;
1724 #endif /* LWIP_TCP */
1725 }
1726
1727 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1728 /* cannot fit into one datagram (at least for us) */
1729 set_errno(EMSGSIZE);
1730 done_socket(sock);
1731 return -1;
1732 }
1733 short_size = (u16_t)size;
1734 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1735 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1736 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1737 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1738 LWIP_UNUSED_ARG(tolen);
1739
1740 /* initialize a buffer */
1741 buf.p = buf.ptr = NULL;
1742 #if LWIP_CHECKSUM_ON_COPY
1743 buf.flags = 0;
1744 #endif /* LWIP_CHECKSUM_ON_COPY */
1745 if (to) {
1746 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1747 } else {
1748 remote_port = 0;
1749 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1750 }
1751 netbuf_fromport(&buf) = remote_port;
1752
1753
1754 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1755 s, data, short_size, flags));
1756 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1757 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1758
1759 /* make the buffer point to the data that should be sent */
1760 #if LWIP_NETIF_TX_SINGLE_PBUF
1761 /* Allocate a new netbuf and copy the data into it. */
1762 if (netbuf_alloc(&buf, short_size) == NULL) {
1763 err = ERR_MEM;
1764 } else {
1765 #if LWIP_CHECKSUM_ON_COPY
1766 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1767 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1768 netbuf_set_chksum(&buf, chksum);
1769 } else
1770 #endif /* LWIP_CHECKSUM_ON_COPY */
1771 {
1772 MEMCPY(buf.p->payload, data, short_size);
1773 }
1774 err = ERR_OK;
1775 }
1776 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1777 err = netbuf_ref(&buf, data, short_size);
1778 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1779 if (err == ERR_OK) {
1780 #if LWIP_IPV4 && LWIP_IPV6
1781 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1782 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1783 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1784 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1785 }
1786 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1787
1788 /* send the data */
1789 err = netconn_send(sock->conn, &buf);
1790 }
1791
1792 /* deallocated the buffer */
1793 netbuf_free(&buf);
1794
1795 set_errno(err_to_errno(err));
1796 done_socket(sock);
1797 return (err == ERR_OK ? short_size : -1);
1798 }
1799
1800 int
lwip_socket(int domain,int type,int protocol)1801 lwip_socket(int domain, int type, int protocol)
1802 {
1803 struct netconn *conn;
1804 int i;
1805
1806 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1807
1808 /* create a netconn */
1809 switch (type) {
1810 case SOCK_RAW:
1811 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1812 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1813 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1814 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1815 break;
1816 case SOCK_DGRAM:
1817 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1818 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1819 DEFAULT_SOCKET_EVENTCB);
1820 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1821 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1822 #if LWIP_NETBUF_RECVINFO
1823 if (conn) {
1824 /* netconn layer enables pktinfo by default, sockets default to off */
1825 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1826 }
1827 #endif /* LWIP_NETBUF_RECVINFO */
1828 break;
1829 case SOCK_STREAM:
1830 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1831 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1832 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1833 break;
1834 default:
1835 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1836 domain, type, protocol));
1837 set_errno(EINVAL);
1838 return -1;
1839 }
1840
1841 if (!conn) {
1842 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1843 set_errno(ENOBUFS);
1844 return -1;
1845 }
1846
1847 i = alloc_socket(conn, 0);
1848
1849 if (i == -1) {
1850 netconn_delete(conn);
1851 set_errno(ENFILE);
1852 return -1;
1853 }
1854 conn->callback_arg.socket = i;
1855 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1856 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1857 set_errno(0);
1858 return i;
1859 }
1860
1861 ssize_t
lwip_write(int s,const void * data,size_t size)1862 lwip_write(int s, const void *data, size_t size)
1863 {
1864 return lwip_send(s, data, size, 0);
1865 }
1866
1867 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1868 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1869 {
1870 struct msghdr msg;
1871
1872 msg.msg_name = NULL;
1873 msg.msg_namelen = 0;
1874 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1875 Blame the opengroup standard for this inconsistency. */
1876 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1877 msg.msg_iovlen = iovcnt;
1878 msg.msg_control = NULL;
1879 msg.msg_controllen = 0;
1880 msg.msg_flags = 0;
1881 return lwip_sendmsg(s, &msg, 0);
1882 }
1883
1884 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1885 /* Add select_cb to select_cb_list. */
1886 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1887 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1888 {
1889 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1890
1891 /* Protect the select_cb_list */
1892 LWIP_SOCKET_SELECT_PROTECT(lev);
1893
1894 /* Put this select_cb on top of list */
1895 select_cb->next = select_cb_list;
1896 if (select_cb_list != NULL) {
1897 select_cb_list->prev = select_cb;
1898 }
1899 select_cb_list = select_cb;
1900 #if !LWIP_TCPIP_CORE_LOCKING
1901 /* Increasing this counter tells select_check_waiters that the list has changed. */
1902 select_cb_ctr++;
1903 #endif
1904
1905 /* Now we can safely unprotect */
1906 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1907 }
1908
1909 /* Remove select_cb from select_cb_list. */
1910 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1911 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1912 {
1913 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1914
1915 /* Take us off the list */
1916 LWIP_SOCKET_SELECT_PROTECT(lev);
1917 if (select_cb->next != NULL) {
1918 select_cb->next->prev = select_cb->prev;
1919 }
1920 if (select_cb_list == select_cb) {
1921 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1922 select_cb_list = select_cb->next;
1923 } else {
1924 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1925 select_cb->prev->next = select_cb->next;
1926 }
1927 #if !LWIP_TCPIP_CORE_LOCKING
1928 /* Increasing this counter tells select_check_waiters that the list has changed. */
1929 select_cb_ctr++;
1930 #endif
1931 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1932 }
1933 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1934
1935 #if LWIP_SOCKET_SELECT
1936 /**
1937 * Go through the readset and writeset lists and see which socket of the sockets
1938 * set in the sets has events. On return, readset, writeset and exceptset have
1939 * the sockets enabled that had events.
1940 *
1941 * @param maxfdp1 the highest socket index in the sets
1942 * @param readset_in set of sockets to check for read events
1943 * @param writeset_in set of sockets to check for write events
1944 * @param exceptset_in set of sockets to check for error events
1945 * @param readset_out set of sockets that had read events
1946 * @param writeset_out set of sockets that had write events
1947 * @param exceptset_out set os sockets that had error events
1948 * @return number of sockets that had events (read/write/exception) (>= 0)
1949 */
1950 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1951 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1952 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1953 {
1954 int i, nready = 0;
1955 fd_set lreadset, lwriteset, lexceptset;
1956 struct lwip_sock *sock;
1957 SYS_ARCH_DECL_PROTECT(lev);
1958
1959 FD_ZERO(&lreadset);
1960 FD_ZERO(&lwriteset);
1961 FD_ZERO(&lexceptset);
1962
1963 /* Go through each socket in each list to count number of sockets which
1964 currently match */
1965 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1966 /* if this FD is not in the set, continue */
1967 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1968 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1969 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1970 continue;
1971 }
1972 /* First get the socket's status (protected)... */
1973 SYS_ARCH_PROTECT(lev);
1974 sock = tryget_socket_unconn_locked(i);
1975 if (sock != NULL) {
1976 void *lastdata = sock->lastdata.pbuf;
1977 s16_t rcvevent = sock->rcvevent;
1978 u16_t sendevent = sock->sendevent;
1979 u16_t errevent = sock->errevent;
1980 SYS_ARCH_UNPROTECT(lev);
1981
1982 /* ... then examine it: */
1983 /* See if netconn of this socket is ready for read */
1984 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1985 FD_SET(i, &lreadset);
1986 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1987 nready++;
1988 }
1989 /* See if netconn of this socket is ready for write */
1990 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1991 FD_SET(i, &lwriteset);
1992 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1993 nready++;
1994 }
1995 /* See if netconn of this socket had an error */
1996 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1997 FD_SET(i, &lexceptset);
1998 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1999 nready++;
2000 }
2001 done_socket(sock);
2002 } else {
2003 SYS_ARCH_UNPROTECT(lev);
2004 /* no a valid open socket */
2005 return -1;
2006 }
2007 }
2008 /* copy local sets to the ones provided as arguments */
2009 *readset_out = lreadset;
2010 *writeset_out = lwriteset;
2011 *exceptset_out = lexceptset;
2012
2013 LWIP_ASSERT("nready >= 0", nready >= 0);
2014 return nready;
2015 }
2016
2017 #if LWIP_NETCONN_FULLDUPLEX
2018 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
2019 * All sockets are marked (and later unmarked), whether they are open or not.
2020 * This is OK as lwip_selscan aborts select when non-open sockets are found.
2021 */
2022 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)2023 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
2024 {
2025 SYS_ARCH_DECL_PROTECT(lev);
2026 if (fdset) {
2027 int i;
2028 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
2029 /* if this FD is in the set, lock it (unless already done) */
2030 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
2031 struct lwip_sock *sock;
2032 SYS_ARCH_PROTECT(lev);
2033 sock = tryget_socket_unconn_locked(i);
2034 if (sock != NULL) {
2035 /* leave the socket used until released by lwip_select_dec_sockets_used */
2036 FD_SET(i, used_sockets);
2037 }
2038 SYS_ARCH_UNPROTECT(lev);
2039 }
2040 }
2041 }
2042 }
2043
2044 /* Mark all sockets passed to select as used to prevent them from being freed
2045 * from other threads while select is running.
2046 * Marked sockets are added to 'used_sockets' to mark them only once an be able
2047 * to unmark them correctly.
2048 */
2049 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)2050 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
2051 {
2052 FD_ZERO(used_sockets);
2053 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
2054 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
2055 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
2056 }
2057
2058 /* Let go all sockets that were marked as used when starting select */
2059 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)2060 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
2061 {
2062 int i;
2063 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
2064 /* if this FD is not in the set, continue */
2065 if (FD_ISSET(i, used_sockets)) {
2066 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
2067 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2068 if (sock != NULL) {
2069 done_socket(sock);
2070 }
2071 }
2072 }
2073 }
2074 #else /* LWIP_NETCONN_FULLDUPLEX */
2075 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
2076 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
2077 #endif /* LWIP_NETCONN_FULLDUPLEX */
2078
2079 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)2080 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
2081 struct timeval *timeout)
2082 {
2083 u32_t waitres = 0;
2084 int nready;
2085 fd_set lreadset, lwriteset, lexceptset;
2086 u32_t msectimeout;
2087 int i;
2088 int maxfdp2;
2089 #if LWIP_NETCONN_SEM_PER_THREAD
2090 int waited = 0;
2091 #endif
2092 #if LWIP_NETCONN_FULLDUPLEX
2093 fd_set used_sockets;
2094 #endif
2095 SYS_ARCH_DECL_PROTECT(lev);
2096
2097 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
2098 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
2099 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
2100 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
2101
2102 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
2103 set_errno(EINVAL);
2104 return -1;
2105 }
2106
2107 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
2108
2109 /* Go through each socket in each list to count number of sockets which
2110 currently match */
2111 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2112
2113 if (nready < 0) {
2114 /* one of the sockets in one of the fd_sets was invalid */
2115 set_errno(EBADF);
2116 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2117 return -1;
2118 } else if (nready > 0) {
2119 /* one or more sockets are set, no need to wait */
2120 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2121 } else {
2122 /* If we don't have any current events, then suspend if we are supposed to */
2123 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2124 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2125 /* This is OK as the local fdsets are empty and nready is zero,
2126 or we would have returned earlier. */
2127 } else {
2128 /* None ready: add our semaphore to list:
2129 We don't actually need any dynamic memory. Our entry on the
2130 list is only valid while we are in this function, so it's ok
2131 to use local variables (unless we're running in MPU compatible
2132 mode). */
2133 API_SELECT_CB_VAR_DECLARE(select_cb);
2134 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2135 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2136
2137 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2138 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2139 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2140 #if LWIP_NETCONN_SEM_PER_THREAD
2141 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2142 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2143 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2144 /* failed to create semaphore */
2145 set_errno(ENOMEM);
2146 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2147 API_SELECT_CB_VAR_FREE(select_cb);
2148 return -1;
2149 }
2150 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2151
2152 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2153
2154 /* Increase select_waiting for each socket we are interested in */
2155 maxfdp2 = maxfdp1;
2156 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2157 if ((readset && FD_ISSET(i, readset)) ||
2158 (writeset && FD_ISSET(i, writeset)) ||
2159 (exceptset && FD_ISSET(i, exceptset))) {
2160 struct lwip_sock *sock;
2161 SYS_ARCH_PROTECT(lev);
2162 sock = tryget_socket_unconn_locked(i);
2163 if (sock != NULL) {
2164 sock->select_waiting++;
2165 if (sock->select_waiting == 0) {
2166 /* overflow - too many threads waiting */
2167 sock->select_waiting--;
2168 nready = -1;
2169 maxfdp2 = i;
2170 SYS_ARCH_UNPROTECT(lev);
2171 done_socket(sock);
2172 set_errno(EBUSY);
2173 break;
2174 }
2175 SYS_ARCH_UNPROTECT(lev);
2176 done_socket(sock);
2177 } else {
2178 /* Not a valid socket */
2179 nready = -1;
2180 maxfdp2 = i;
2181 SYS_ARCH_UNPROTECT(lev);
2182 set_errno(EBADF);
2183 break;
2184 }
2185 }
2186 }
2187
2188 if (nready >= 0) {
2189 /* Call lwip_selscan again: there could have been events between
2190 the last scan (without us on the list) and putting us on the list! */
2191 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2192 if (nready < 0) {
2193 set_errno(EBADF);
2194 } else if (!nready) {
2195 /* Still none ready, just wait to be woken */
2196 if (timeout == NULL) {
2197 /* Wait forever */
2198 msectimeout = 0;
2199 } else {
2200 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2201 if (msecs_long <= 0) {
2202 /* Wait 1ms at least (0 means wait forever) */
2203 msectimeout = 1;
2204 } else {
2205 msectimeout = (u32_t)msecs_long;
2206 }
2207 }
2208
2209 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2210 #if LWIP_NETCONN_SEM_PER_THREAD
2211 waited = 1;
2212 #endif
2213 }
2214 }
2215
2216 /* Decrease select_waiting for each socket we are interested in */
2217 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2218 if ((readset && FD_ISSET(i, readset)) ||
2219 (writeset && FD_ISSET(i, writeset)) ||
2220 (exceptset && FD_ISSET(i, exceptset))) {
2221 struct lwip_sock *sock;
2222 SYS_ARCH_PROTECT(lev);
2223 sock = tryget_socket_unconn_nouse(i);
2224 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
2225 if (sock != NULL) {
2226 /* for now, handle select_waiting==0... */
2227 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2228 if (sock->select_waiting > 0) {
2229 sock->select_waiting--;
2230 }
2231 SYS_ARCH_UNPROTECT(lev);
2232 } else {
2233 SYS_ARCH_UNPROTECT(lev);
2234 /* Not a valid socket */
2235 nready = -1;
2236 set_errno(EBADF);
2237 }
2238 }
2239 }
2240
2241 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2242
2243 #if LWIP_NETCONN_SEM_PER_THREAD
2244 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2245 /* don't leave the thread-local semaphore signalled */
2246 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2247 }
2248 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2249 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2250 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2251 API_SELECT_CB_VAR_FREE(select_cb);
2252
2253 if (nready < 0) {
2254 /* This happens when a socket got closed while waiting */
2255 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2256 return -1;
2257 }
2258
2259 if (waitres == SYS_ARCH_TIMEOUT) {
2260 /* Timeout */
2261 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2262 /* This is OK as the local fdsets are empty and nready is zero,
2263 or we would have returned earlier. */
2264 } else {
2265 /* See what's set now after waiting */
2266 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2267 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2268 if (nready < 0) {
2269 set_errno(EBADF);
2270 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2271 return -1;
2272 }
2273 }
2274 }
2275 }
2276
2277 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2278 set_errno(0);
2279 if (readset) {
2280 *readset = lreadset;
2281 }
2282 if (writeset) {
2283 *writeset = lwriteset;
2284 }
2285 if (exceptset) {
2286 *exceptset = lexceptset;
2287 }
2288 return nready;
2289 }
2290 #endif /* LWIP_SOCKET_SELECT */
2291
2292 #if LWIP_SOCKET_POLL
2293 /** Options for the lwip_pollscan function. */
2294 enum lwip_pollscan_opts
2295 {
2296 /** Clear revents in each struct pollfd. */
2297 LWIP_POLLSCAN_CLEAR = 1,
2298
2299 /** Increment select_waiting in each struct lwip_sock. */
2300 LWIP_POLLSCAN_INC_WAIT = 2,
2301
2302 /** Decrement select_waiting in each struct lwip_sock. */
2303 LWIP_POLLSCAN_DEC_WAIT = 4
2304 };
2305
2306 /**
2307 * Update revents in each struct pollfd.
2308 * Optionally update select_waiting in struct lwip_sock.
2309 *
2310 * @param fds array of structures to update
2311 * @param nfds number of structures in fds
2312 * @param opts what to update and how
2313 * @return number of structures that have revents != 0
2314 */
2315 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2316 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2317 {
2318 int nready = 0;
2319 nfds_t fdi;
2320 struct lwip_sock *sock;
2321 SYS_ARCH_DECL_PROTECT(lev);
2322
2323 /* Go through each struct pollfd in the array. */
2324 for (fdi = 0; fdi < nfds; fdi++) {
2325 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2326 fds[fdi].revents = 0;
2327 }
2328
2329 /* Negative fd means the caller wants us to ignore this struct.
2330 POLLNVAL means we already detected that the fd is invalid;
2331 if another thread has since opened a new socket with that fd,
2332 we must not use that socket. */
2333 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2334 /* First get the socket's status (protected)... */
2335 SYS_ARCH_PROTECT(lev);
2336 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2337 if (sock != NULL) {
2338 void* lastdata = sock->lastdata.pbuf;
2339 s16_t rcvevent = sock->rcvevent;
2340 u16_t sendevent = sock->sendevent;
2341 u16_t errevent = sock->errevent;
2342
2343 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2344 sock->select_waiting++;
2345 if (sock->select_waiting == 0) {
2346 /* overflow - too many threads waiting */
2347 sock->select_waiting--;
2348 nready = -1;
2349 SYS_ARCH_UNPROTECT(lev);
2350 done_socket(sock);
2351 break;
2352 }
2353 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2354 /* for now, handle select_waiting==0... */
2355 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2356 if (sock->select_waiting > 0) {
2357 sock->select_waiting--;
2358 }
2359 }
2360 SYS_ARCH_UNPROTECT(lev);
2361 done_socket(sock);
2362
2363 /* ... then examine it: */
2364 /* See if netconn of this socket is ready for read */
2365 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2366 fds[fdi].revents |= POLLIN;
2367 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2368 }
2369 /* See if netconn of this socket is ready for write */
2370 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2371 fds[fdi].revents |= POLLOUT;
2372 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2373 }
2374 /* See if netconn of this socket had an error */
2375 if (errevent != 0) {
2376 /* POLLERR is output only. */
2377 fds[fdi].revents |= POLLERR;
2378 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2379 }
2380 } else {
2381 /* Not a valid socket */
2382 SYS_ARCH_UNPROTECT(lev);
2383 /* POLLNVAL is output only. */
2384 fds[fdi].revents |= POLLNVAL;
2385 return -1;
2386 }
2387 }
2388
2389 /* Will return the number of structures that have events,
2390 not the number of events. */
2391 if (fds[fdi].revents != 0) {
2392 nready++;
2393 }
2394 }
2395
2396 LWIP_ASSERT("nready >= 0", nready >= 0);
2397 return nready;
2398 }
2399
2400 #if LWIP_NETCONN_FULLDUPLEX
2401 /* Mark all sockets as used.
2402 *
2403 * All sockets are marked (and later unmarked), whether they are open or not.
2404 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2405 */
2406 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2407 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2408 {
2409 nfds_t fdi;
2410
2411 if(fds) {
2412 /* Go through each struct pollfd in the array. */
2413 for (fdi = 0; fdi < nfds; fdi++) {
2414 /* Increase the reference counter */
2415 tryget_socket_unconn(fds[fdi].fd);
2416 }
2417 }
2418 }
2419
2420 /* Let go all sockets that were marked as used when starting poll */
2421 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2422 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2423 {
2424 nfds_t fdi;
2425
2426 if(fds) {
2427 /* Go through each struct pollfd in the array. */
2428 for (fdi = 0; fdi < nfds; fdi++) {
2429 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2430 if (sock != NULL) {
2431 done_socket(sock);
2432 }
2433 }
2434 }
2435 }
2436 #else /* LWIP_NETCONN_FULLDUPLEX */
2437 #define lwip_poll_inc_sockets_used(fds, nfds)
2438 #define lwip_poll_dec_sockets_used(fds, nfds)
2439 #endif /* LWIP_NETCONN_FULLDUPLEX */
2440
2441 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2442 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2443 {
2444 u32_t waitres = 0;
2445 int nready;
2446 u32_t msectimeout;
2447 #if LWIP_NETCONN_SEM_PER_THREAD
2448 int waited = 0;
2449 #endif
2450
2451 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2452 (void*)fds, (int)nfds, timeout));
2453 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2454 set_errno(EINVAL); return -1;);
2455
2456 lwip_poll_inc_sockets_used(fds, nfds);
2457
2458 /* Go through each struct pollfd to count number of structures
2459 which currently match */
2460 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2461
2462 if (nready < 0) {
2463 lwip_poll_dec_sockets_used(fds, nfds);
2464 return -1;
2465 }
2466
2467 /* If we don't have any current events, then suspend if we are supposed to */
2468 if (!nready) {
2469 API_SELECT_CB_VAR_DECLARE(select_cb);
2470
2471 if (timeout == 0) {
2472 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2473 goto return_success;
2474 }
2475 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2476 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2477
2478 /* None ready: add our semaphore to list:
2479 We don't actually need any dynamic memory. Our entry on the
2480 list is only valid while we are in this function, so it's ok
2481 to use local variables. */
2482
2483 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2484 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2485 #if LWIP_NETCONN_SEM_PER_THREAD
2486 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2487 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2488 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2489 /* failed to create semaphore */
2490 set_errno(EAGAIN);
2491 lwip_poll_dec_sockets_used(fds, nfds);
2492 API_SELECT_CB_VAR_FREE(select_cb);
2493 return -1;
2494 }
2495 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2496
2497 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2498
2499 /* Increase select_waiting for each socket we are interested in.
2500 Also, check for events again: there could have been events between
2501 the last scan (without us on the list) and putting us on the list! */
2502 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2503
2504 if (!nready) {
2505 /* Still none ready, just wait to be woken */
2506 if (timeout < 0) {
2507 /* Wait forever */
2508 msectimeout = 0;
2509 } else {
2510 /* timeout == 0 would have been handled earlier. */
2511 LWIP_ASSERT("timeout > 0", timeout > 0);
2512 msectimeout = timeout;
2513 }
2514 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2515 #if LWIP_NETCONN_SEM_PER_THREAD
2516 waited = 1;
2517 #endif
2518 }
2519
2520 /* Decrease select_waiting for each socket we are interested in,
2521 and check which events occurred while we waited. */
2522 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2523
2524 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2525
2526 #if LWIP_NETCONN_SEM_PER_THREAD
2527 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2528 /* don't leave the thread-local semaphore signalled */
2529 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2530 }
2531 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2532 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2533 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2534 API_SELECT_CB_VAR_FREE(select_cb);
2535
2536 if (nready < 0) {
2537 /* This happens when a socket got closed while waiting */
2538 lwip_poll_dec_sockets_used(fds, nfds);
2539 return -1;
2540 }
2541
2542 if (waitres == SYS_ARCH_TIMEOUT) {
2543 /* Timeout */
2544 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2545 goto return_success;
2546 }
2547 }
2548
2549 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2550 return_success:
2551 lwip_poll_dec_sockets_used(fds, nfds);
2552 set_errno(0);
2553 return nready;
2554 }
2555
2556 /**
2557 * Check whether event_callback should wake up a thread waiting in
2558 * lwip_poll.
2559 */
2560 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2561 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2562 {
2563 nfds_t fdi;
2564 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2565 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2566 if (pollfd->fd == fd) {
2567 /* Do not update pollfd->revents right here;
2568 that would be a data race because lwip_pollscan
2569 accesses revents without protecting. */
2570 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2571 return 1;
2572 }
2573 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2574 return 1;
2575 }
2576 if (has_errevent) {
2577 /* POLLERR is output only. */
2578 return 1;
2579 }
2580 }
2581 }
2582 return 0;
2583 }
2584 #endif /* LWIP_SOCKET_POLL */
2585
2586 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2587 /**
2588 * Callback registered in the netconn layer for each socket-netconn.
2589 * Processes recvevent (data available) and wakes up tasks waiting for select.
2590 *
2591 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2592 * must have the core lock held when signaling the following events
2593 * as they might cause select_list_cb to be checked:
2594 * NETCONN_EVT_RCVPLUS
2595 * NETCONN_EVT_SENDPLUS
2596 * NETCONN_EVT_ERROR
2597 * This requirement will be asserted in select_check_waiters()
2598 */
2599 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2600 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2601 {
2602 int s, check_waiters;
2603 struct lwip_sock *sock;
2604 SYS_ARCH_DECL_PROTECT(lev);
2605
2606 LWIP_UNUSED_ARG(len);
2607
2608 /* Get socket */
2609 if (conn) {
2610 s = conn->callback_arg.socket;
2611 if (s < 0) {
2612 /* Data comes in right away after an accept, even though
2613 * the server task might not have created a new socket yet.
2614 * Just count down (or up) if that's the case and we
2615 * will use the data later. Note that only receive events
2616 * can happen before the new socket is set up. */
2617 SYS_ARCH_PROTECT(lev);
2618 if (conn->callback_arg.socket < 0) {
2619 if (evt == NETCONN_EVT_RCVPLUS) {
2620 /* conn->socket is -1 on initialization
2621 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2622 conn->callback_arg.socket--;
2623 }
2624 SYS_ARCH_UNPROTECT(lev);
2625 return;
2626 }
2627 s = conn->callback_arg.socket;
2628 SYS_ARCH_UNPROTECT(lev);
2629 }
2630
2631 sock = get_socket(s);
2632 if (!sock) {
2633 return;
2634 }
2635 } else {
2636 return;
2637 }
2638
2639 check_waiters = 1;
2640 SYS_ARCH_PROTECT(lev);
2641 /* Set event as required */
2642 switch (evt) {
2643 case NETCONN_EVT_RCVPLUS:
2644 sock->rcvevent++;
2645 if (sock->rcvevent > 1) {
2646 check_waiters = 0;
2647 }
2648 break;
2649 case NETCONN_EVT_RCVMINUS:
2650 sock->rcvevent--;
2651 check_waiters = 0;
2652 break;
2653 case NETCONN_EVT_SENDPLUS:
2654 if (sock->sendevent) {
2655 check_waiters = 0;
2656 }
2657 sock->sendevent = 1;
2658 break;
2659 case NETCONN_EVT_SENDMINUS:
2660 sock->sendevent = 0;
2661 check_waiters = 0;
2662 break;
2663 case NETCONN_EVT_ERROR:
2664 sock->errevent = 1;
2665 break;
2666 default:
2667 LWIP_ASSERT("unknown event", 0);
2668 break;
2669 }
2670
2671 if (sock->select_waiting && check_waiters) {
2672 /* Save which events are active */
2673 int has_recvevent, has_sendevent, has_errevent;
2674 has_recvevent = sock->rcvevent > 0;
2675 has_sendevent = sock->sendevent != 0;
2676 has_errevent = sock->errevent != 0;
2677 SYS_ARCH_UNPROTECT(lev);
2678 /* Check any select calls waiting on this socket */
2679 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2680 } else {
2681 SYS_ARCH_UNPROTECT(lev);
2682 }
2683 poll_check_waiters(s, check_waiters);
2684 done_socket(sock);
2685 }
2686
2687 /**
2688 * Check if any select waiters are waiting on this socket and its events
2689 *
2690 * @note on synchronization of select_cb_list:
2691 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2692 * the core lock. We do a single pass through the list and signal any waiters.
2693 * Core lock should already be held when calling here!!!!
2694
2695 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2696 * of the loop, thus creating a possibility where a thread could modify the
2697 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2698 * detect this change and restart the list walk. The list is expected to be small
2699 */
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent)2700 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2701 {
2702 struct lwip_select_cb *scb;
2703 #if !LWIP_TCPIP_CORE_LOCKING
2704 int last_select_cb_ctr;
2705 SYS_ARCH_DECL_PROTECT(lev);
2706 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2707
2708 LWIP_ASSERT_CORE_LOCKED();
2709
2710 #if !LWIP_TCPIP_CORE_LOCKING
2711 SYS_ARCH_PROTECT(lev);
2712 again:
2713 /* remember the state of select_cb_list to detect changes */
2714 last_select_cb_ctr = select_cb_ctr;
2715 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2716 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2717 if (scb->sem_signalled == 0) {
2718 /* semaphore not signalled yet */
2719 int do_signal = 0;
2720 #if LWIP_SOCKET_POLL
2721 if (scb->poll_fds != NULL) {
2722 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2723 }
2724 #endif /* LWIP_SOCKET_POLL */
2725 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2726 else
2727 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2728 #if LWIP_SOCKET_SELECT
2729 {
2730 /* Test this select call for our socket */
2731 if (has_recvevent) {
2732 if (scb->readset && FD_ISSET(s, scb->readset)) {
2733 do_signal = 1;
2734 }
2735 }
2736 if (has_sendevent) {
2737 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2738 do_signal = 1;
2739 }
2740 }
2741 if (has_errevent) {
2742 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2743 do_signal = 1;
2744 }
2745 }
2746 }
2747 #endif /* LWIP_SOCKET_SELECT */
2748 if (do_signal) {
2749 scb->sem_signalled = 1;
2750 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2751 the semaphore, as this might lead to the select thread taking itself off the list,
2752 invalidating the semaphore. */
2753 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2754 }
2755 }
2756 #if LWIP_TCPIP_CORE_LOCKING
2757 }
2758 #else
2759 /* unlock interrupts with each step */
2760 SYS_ARCH_UNPROTECT(lev);
2761 /* this makes sure interrupt protection time is short */
2762 SYS_ARCH_PROTECT(lev);
2763 if (last_select_cb_ctr != select_cb_ctr) {
2764 /* someone has changed select_cb_list, restart at the beginning */
2765 goto again;
2766 }
2767 /* remember the state of select_cb_list to detect changes */
2768 last_select_cb_ctr = select_cb_ctr;
2769 }
2770 SYS_ARCH_UNPROTECT(lev);
2771 #endif
2772 }
2773 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2774
2775 /**
2776 * Close one end of a full-duplex connection.
2777 */
2778 int
lwip_shutdown(int s,int how)2779 lwip_shutdown(int s, int how)
2780 {
2781 struct lwip_sock *sock;
2782 err_t err;
2783 u8_t shut_rx = 0, shut_tx = 0;
2784
2785 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2786
2787 sock = get_socket(s);
2788 if (!sock) {
2789 return -1;
2790 }
2791
2792 if (sock->conn != NULL) {
2793 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2794 set_errno(EOPNOTSUPP);
2795 done_socket(sock);
2796 return -1;
2797 }
2798 } else {
2799 set_errno(ENOTCONN);
2800 done_socket(sock);
2801 return -1;
2802 }
2803
2804 if (how == SHUT_RD) {
2805 shut_rx = 1;
2806 } else if (how == SHUT_WR) {
2807 shut_tx = 1;
2808 } else if (how == SHUT_RDWR) {
2809 shut_rx = 1;
2810 shut_tx = 1;
2811 } else {
2812 set_errno(EINVAL);
2813 done_socket(sock);
2814 return -1;
2815 }
2816 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2817
2818 set_errno(err_to_errno(err));
2819 done_socket(sock);
2820 return (err == ERR_OK ? 0 : -1);
2821 }
2822
2823 static int
lwip_getaddrname(int s,struct sockaddr * name,socklen_t * namelen,u8_t local)2824 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2825 {
2826 struct lwip_sock *sock;
2827 union sockaddr_aligned saddr;
2828 ip_addr_t naddr;
2829 u16_t port;
2830 err_t err;
2831
2832 sock = get_socket(s);
2833 if (!sock) {
2834 return -1;
2835 }
2836
2837 /* get the IP address and port */
2838 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2839 if (err != ERR_OK) {
2840 set_errno(err_to_errno(err));
2841 done_socket(sock);
2842 return -1;
2843 }
2844
2845 #if LWIP_IPV4 && LWIP_IPV6
2846 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2847 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2848 IP_IS_V4_VAL(naddr)) {
2849 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2850 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2851 }
2852 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2853
2854 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2855
2856 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2857 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2858 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2859
2860 if (*namelen > IPADDR_SOCKADDR_GET_LEN(&saddr)) {
2861 *namelen = IPADDR_SOCKADDR_GET_LEN(&saddr);
2862 }
2863 MEMCPY(name, &saddr, *namelen);
2864
2865 set_errno(0);
2866 done_socket(sock);
2867 return 0;
2868 }
2869
2870 int
lwip_getpeername(int s,struct sockaddr * name,socklen_t * namelen)2871 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2872 {
2873 return lwip_getaddrname(s, name, namelen, 0);
2874 }
2875
2876 int
lwip_getsockname(int s,struct sockaddr * name,socklen_t * namelen)2877 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2878 {
2879 return lwip_getaddrname(s, name, namelen, 1);
2880 }
2881
2882 int
lwip_getsockopt(int s,int level,int optname,void * optval,socklen_t * optlen)2883 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2884 {
2885 int err;
2886 struct lwip_sock *sock = get_socket(s);
2887 #if !LWIP_TCPIP_CORE_LOCKING
2888 err_t cberr;
2889 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2890 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2891
2892 if (!sock) {
2893 return -1;
2894 }
2895
2896 if ((NULL == optval) || (NULL == optlen)) {
2897 set_errno(EFAULT);
2898 done_socket(sock);
2899 return -1;
2900 }
2901
2902 #if LWIP_TCPIP_CORE_LOCKING
2903 /* core-locking can just call the -impl function */
2904 LOCK_TCPIP_CORE();
2905 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2906 UNLOCK_TCPIP_CORE();
2907
2908 #else /* LWIP_TCPIP_CORE_LOCKING */
2909
2910 #if LWIP_MPU_COMPATIBLE
2911 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2912 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2913 set_errno(ENOBUFS);
2914 done_socket(sock);
2915 return -1;
2916 }
2917 #endif /* LWIP_MPU_COMPATIBLE */
2918
2919 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2920 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2921 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2922 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2923 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2924 #if !LWIP_MPU_COMPATIBLE
2925 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2926 #endif /* !LWIP_MPU_COMPATIBLE */
2927 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2928 #if LWIP_NETCONN_SEM_PER_THREAD
2929 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2930 #else
2931 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2932 #endif
2933 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2934 if (cberr != ERR_OK) {
2935 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2936 set_errno(err_to_errno(cberr));
2937 done_socket(sock);
2938 return -1;
2939 }
2940 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2941
2942 /* write back optlen and optval */
2943 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2944 #if LWIP_MPU_COMPATIBLE
2945 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2946 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2947 #endif /* LWIP_MPU_COMPATIBLE */
2948
2949 /* maybe lwip_getsockopt_impl has changed err */
2950 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2951 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2952 #endif /* LWIP_TCPIP_CORE_LOCKING */
2953
2954 set_errno(err);
2955 done_socket(sock);
2956 return err ? -1 : 0;
2957 }
2958
2959 #if !LWIP_TCPIP_CORE_LOCKING
2960 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2961 * to get into the tcpip_thread
2962 */
2963 static void
lwip_getsockopt_callback(void * arg)2964 lwip_getsockopt_callback(void *arg)
2965 {
2966 struct lwip_setgetsockopt_data *data;
2967 LWIP_ASSERT("arg != NULL", arg != NULL);
2968 data = (struct lwip_setgetsockopt_data *)arg;
2969
2970 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2971 #if LWIP_MPU_COMPATIBLE
2972 data->optval,
2973 #else /* LWIP_MPU_COMPATIBLE */
2974 data->optval.p,
2975 #endif /* LWIP_MPU_COMPATIBLE */
2976 &data->optlen);
2977
2978 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2979 }
2980 #endif /* LWIP_TCPIP_CORE_LOCKING */
2981
2982 static int
lwip_sockopt_to_ipopt(int optname)2983 lwip_sockopt_to_ipopt(int optname)
2984 {
2985 /* Map SO_* values to our internal SOF_* values
2986 * We should not rely on #defines in socket.h
2987 * being in sync with ip.h.
2988 */
2989 switch (optname) {
2990 case SO_BROADCAST:
2991 return SOF_BROADCAST;
2992 case SO_KEEPALIVE:
2993 return SOF_KEEPALIVE;
2994 case SO_REUSEADDR:
2995 return SOF_REUSEADDR;
2996 default:
2997 LWIP_ASSERT("Unknown socket option", 0);
2998 return 0;
2999 }
3000 }
3001
3002 #if LWIP_IPV6 && LWIP_RAW
3003 static void
lwip_getsockopt_impl_ipv6_checksum(int s,struct lwip_sock * sock,void * optval)3004 lwip_getsockopt_impl_ipv6_checksum(int s, struct lwip_sock* sock, void* optval)
3005 {
3006 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3007 *(int*)optval = -1;
3008 }
3009 else {
3010 *(int*)optval = sock->conn->pcb.raw->chksum_offset;
3011 }
3012 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3013 s, (*(int*)optval)));
3014 }
3015
3016 static int
lwip_setsockopt_impl_ipv6_checksum(int s,struct lwip_sock * sock,const void * optval,socklen_t optlen)3017 lwip_setsockopt_impl_ipv6_checksum(int s, struct lwip_sock* sock, const void* optval, socklen_t optlen)
3018 {
3019 /* It should not be possible to disable the checksum generation with ICMPv6
3020 * as per RFC 3542 chapter 3.1 */
3021 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3022 done_socket(sock);
3023 return EINVAL;
3024 }
3025
3026 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3027 if (*(const int*)optval < 0) {
3028 sock->conn->pcb.raw->chksum_reqd = 0;
3029 }
3030 else if (*(const int*)optval & 1) {
3031 /* Per RFC3542, odd offsets are not allowed */
3032 done_socket(sock);
3033 return EINVAL;
3034 }
3035 else {
3036 sock->conn->pcb.raw->chksum_reqd = 1;
3037 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int*)optval;
3038 }
3039 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3040 s, sock->conn->pcb.raw->chksum_reqd));
3041 return 0;
3042 }
3043 #endif
3044
3045 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
3046 * same argument as lwip_getsockopt, either called directly or through callback
3047 */
3048 static int
lwip_getsockopt_impl(int s,int level,int optname,void * optval,socklen_t * optlen)3049 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
3050 {
3051 int err = 0;
3052 struct lwip_sock *sock = tryget_socket(s);
3053 if (!sock) {
3054 return EBADF;
3055 }
3056
3057 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
3058 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3059 done_socket(sock);
3060 return err;
3061 }
3062 #endif
3063
3064 switch (level) {
3065
3066 /* Level: SOL_SOCKET */
3067 case SOL_SOCKET:
3068 switch (optname) {
3069
3070 #if LWIP_TCP
3071 case SO_ACCEPTCONN:
3072 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3073 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
3074 done_socket(sock);
3075 return ENOPROTOOPT;
3076 }
3077 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
3078 *(int *)optval = 1;
3079 } else {
3080 *(int *)optval = 0;
3081 }
3082 break;
3083 #endif /* LWIP_TCP */
3084
3085 /* The option flags */
3086 case SO_BROADCAST:
3087 case SO_KEEPALIVE:
3088 #if SO_REUSE
3089 case SO_REUSEADDR:
3090 #endif /* SO_REUSE */
3091 if ((optname == SO_BROADCAST) &&
3092 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3093 done_socket(sock);
3094 return ENOPROTOOPT;
3095 }
3096
3097 optname = lwip_sockopt_to_ipopt(optname);
3098
3099 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3100 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
3101 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
3102 s, optname, (*(int *)optval ? "on" : "off")));
3103 break;
3104
3105 case SO_TYPE:
3106 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3107 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3108 case NETCONN_RAW:
3109 *(int *)optval = SOCK_RAW;
3110 break;
3111 case NETCONN_TCP:
3112 *(int *)optval = SOCK_STREAM;
3113 break;
3114 case NETCONN_UDP:
3115 *(int *)optval = SOCK_DGRAM;
3116 break;
3117 default: /* unrecognized socket type */
3118 *(int *)optval = netconn_type(sock->conn);
3119 LWIP_DEBUGF(SOCKETS_DEBUG,
3120 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
3121 s, *(int *)optval));
3122 } /* switch (netconn_type(sock->conn)) */
3123 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
3124 s, *(int *)optval));
3125 break;
3126
3127 case SO_ERROR:
3128 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
3129 *(int *)optval = err_to_errno(netconn_err(sock->conn));
3130 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
3131 s, *(int *)optval));
3132 break;
3133
3134 #if LWIP_SO_SNDTIMEO
3135 case SO_SNDTIMEO:
3136 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3137 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
3138 break;
3139 #endif /* LWIP_SO_SNDTIMEO */
3140 #if LWIP_SO_RCVTIMEO
3141 case SO_RCVTIMEO:
3142 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3143 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
3144 break;
3145 #endif /* LWIP_SO_RCVTIMEO */
3146 #if LWIP_SO_RCVBUF
3147 case SO_RCVBUF:
3148 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3149 *(int *)optval = netconn_get_recvbufsize(sock->conn);
3150 break;
3151 #endif /* LWIP_SO_RCVBUF */
3152 #if LWIP_SO_LINGER
3153 case SO_LINGER: {
3154 s16_t conn_linger;
3155 struct linger *linger = (struct linger *)optval;
3156 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
3157 conn_linger = sock->conn->linger;
3158 if (conn_linger >= 0) {
3159 linger->l_onoff = 1;
3160 linger->l_linger = (int)conn_linger;
3161 } else {
3162 linger->l_onoff = 0;
3163 linger->l_linger = 0;
3164 }
3165 }
3166 break;
3167 #endif /* LWIP_SO_LINGER */
3168 #if LWIP_UDP
3169 case SO_NO_CHECK:
3170 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
3171 #if LWIP_UDPLITE
3172 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3173 /* this flag is only available for UDP, not for UDP lite */
3174 done_socket(sock);
3175 return EAFNOSUPPORT;
3176 }
3177 #endif /* LWIP_UDPLITE */
3178 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3179 break;
3180 #endif /* LWIP_UDP*/
3181 default:
3182 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3183 s, optname));
3184 err = ENOPROTOOPT;
3185 break;
3186 } /* switch (optname) */
3187 break;
3188
3189 /* Level: IPPROTO_IP */
3190 case IPPROTO_IP:
3191 switch (optname) {
3192 case IP_TTL:
3193 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3194 *(int *)optval = sock->conn->pcb.ip->ttl;
3195 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3196 s, *(int *)optval));
3197 break;
3198 case IP_TOS:
3199 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3200 *(int *)optval = sock->conn->pcb.ip->tos;
3201 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3202 s, *(int *)optval));
3203 break;
3204 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3205 case IP_MULTICAST_TTL:
3206 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3207 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3208 done_socket(sock);
3209 return ENOPROTOOPT;
3210 }
3211 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3212 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3213 s, *(int *)optval));
3214 break;
3215 case IP_MULTICAST_IF:
3216 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3217 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3218 done_socket(sock);
3219 return ENOPROTOOPT;
3220 }
3221 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3222 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3223 s, *(u32_t *)optval));
3224 break;
3225 case IP_MULTICAST_LOOP:
3226 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3227 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3228 *(u8_t *)optval = 1;
3229 } else {
3230 *(u8_t *)optval = 0;
3231 }
3232 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3233 s, *(int *)optval));
3234 break;
3235 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3236 default:
3237 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3238 s, optname));
3239 err = ENOPROTOOPT;
3240 break;
3241 } /* switch (optname) */
3242 break;
3243
3244 #if LWIP_TCP
3245 /* Level: IPPROTO_TCP */
3246 case IPPROTO_TCP:
3247 /* Special case: all IPPROTO_TCP option take an int */
3248 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3249 if (sock->conn->pcb.tcp->state == LISTEN) {
3250 done_socket(sock);
3251 return EINVAL;
3252 }
3253 switch (optname) {
3254 case TCP_NODELAY:
3255 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3256 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3257 s, (*(int *)optval) ? "on" : "off") );
3258 break;
3259 case TCP_KEEPALIVE:
3260 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3261 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3262 s, *(int *)optval));
3263 break;
3264
3265 #if LWIP_TCP_KEEPALIVE
3266 case TCP_KEEPIDLE:
3267 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3268 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3269 s, *(int *)optval));
3270 break;
3271 case TCP_KEEPINTVL:
3272 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3273 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3274 s, *(int *)optval));
3275 break;
3276 case TCP_KEEPCNT:
3277 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3278 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3279 s, *(int *)optval));
3280 break;
3281 #endif /* LWIP_TCP_KEEPALIVE */
3282 default:
3283 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3284 s, optname));
3285 err = ENOPROTOOPT;
3286 break;
3287 } /* switch (optname) */
3288 break;
3289 #endif /* LWIP_TCP */
3290
3291 #if LWIP_IPV6
3292 /* Level: IPPROTO_IPV6 */
3293 case IPPROTO_IPV6:
3294 switch (optname) {
3295 #if LWIP_IPV6 && LWIP_RAW
3296 case IPV6_CHECKSUM:
3297 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3298 lwip_getsockopt_impl_ipv6_checksum(s, sock, optval);
3299 break;
3300 #endif /* LWIP_IPV6 && LWIP_RAW */
3301 case IPV6_V6ONLY:
3302 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3303 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3304 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3305 s, *(int *)optval));
3306 break;
3307 default:
3308 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3309 s, optname));
3310 err = ENOPROTOOPT;
3311 break;
3312 } /* switch (optname) */
3313 break;
3314 #endif /* LWIP_IPV6 */
3315
3316 #if LWIP_UDP && LWIP_UDPLITE
3317 /* Level: IPPROTO_UDPLITE */
3318 case IPPROTO_UDPLITE:
3319 /* Special case: all IPPROTO_UDPLITE option take an int */
3320 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3321 /* If this is no UDP lite socket, ignore any options. */
3322 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3323 done_socket(sock);
3324 return ENOPROTOOPT;
3325 }
3326 switch (optname) {
3327 case UDPLITE_SEND_CSCOV:
3328 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3329 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3330 s, (*(int *)optval)) );
3331 break;
3332 case UDPLITE_RECV_CSCOV:
3333 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3334 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3335 s, (*(int *)optval)) );
3336 break;
3337 default:
3338 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3339 s, optname));
3340 err = ENOPROTOOPT;
3341 break;
3342 } /* switch (optname) */
3343 break;
3344 #endif /* LWIP_UDP */
3345 /* Level: IPPROTO_RAW */
3346 case IPPROTO_RAW:
3347 switch (optname) {
3348 #if LWIP_IPV6 && LWIP_RAW
3349 case IPV6_CHECKSUM:
3350 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3351 lwip_getsockopt_impl_ipv6_checksum(s, sock, optval);
3352 break;
3353 #endif /* LWIP_IPV6 && LWIP_RAW */
3354 default:
3355 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3356 s, optname));
3357 err = ENOPROTOOPT;
3358 break;
3359 } /* switch (optname) */
3360 break;
3361 default:
3362 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3363 s, level, optname));
3364 err = ENOPROTOOPT;
3365 break;
3366 } /* switch (level) */
3367
3368 done_socket(sock);
3369 return err;
3370 }
3371
3372 int
lwip_setsockopt(int s,int level,int optname,const void * optval,socklen_t optlen)3373 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3374 {
3375 int err = 0;
3376 struct lwip_sock *sock = get_socket(s);
3377 #if !LWIP_TCPIP_CORE_LOCKING
3378 err_t cberr;
3379 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3380 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3381
3382 if (!sock) {
3383 return -1;
3384 }
3385
3386 if (NULL == optval) {
3387 set_errno(EFAULT);
3388 done_socket(sock);
3389 return -1;
3390 }
3391
3392 #if LWIP_TCPIP_CORE_LOCKING
3393 /* core-locking can just call the -impl function */
3394 LOCK_TCPIP_CORE();
3395 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3396 UNLOCK_TCPIP_CORE();
3397 #if LWIP_LOWPOWER
3398 tcpip_send_msg_na(LOW_NON_BLOCK);
3399 #endif
3400
3401 #else /* LWIP_TCPIP_CORE_LOCKING */
3402
3403 #if LWIP_MPU_COMPATIBLE
3404 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3405 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3406 set_errno(ENOBUFS);
3407 done_socket(sock);
3408 return -1;
3409 }
3410 #endif /* LWIP_MPU_COMPATIBLE */
3411
3412 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3413 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3414 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3415 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3416 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3417 #if LWIP_MPU_COMPATIBLE
3418 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3419 #else /* LWIP_MPU_COMPATIBLE */
3420 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3421 #endif /* LWIP_MPU_COMPATIBLE */
3422 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3423 #if LWIP_NETCONN_SEM_PER_THREAD
3424 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3425 #else
3426 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3427 #endif
3428 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3429 if (cberr != ERR_OK) {
3430 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3431 set_errno(err_to_errno(cberr));
3432 done_socket(sock);
3433 return -1;
3434 }
3435 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3436
3437 /* maybe lwip_setsockopt_impl has changed err */
3438 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3439 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3440 #endif /* LWIP_TCPIP_CORE_LOCKING */
3441
3442 set_errno(err);
3443 done_socket(sock);
3444 return err ? -1 : 0;
3445 }
3446
3447 #if !LWIP_TCPIP_CORE_LOCKING
3448 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3449 * to get into the tcpip_thread
3450 */
3451 static void
lwip_setsockopt_callback(void * arg)3452 lwip_setsockopt_callback(void *arg)
3453 {
3454 struct lwip_setgetsockopt_data *data;
3455 LWIP_ASSERT("arg != NULL", arg != NULL);
3456 data = (struct lwip_setgetsockopt_data *)arg;
3457
3458 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3459 #if LWIP_MPU_COMPATIBLE
3460 data->optval,
3461 #else /* LWIP_MPU_COMPATIBLE */
3462 data->optval.pc,
3463 #endif /* LWIP_MPU_COMPATIBLE */
3464 data->optlen);
3465
3466 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3467 }
3468 #endif /* LWIP_TCPIP_CORE_LOCKING */
3469
3470 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3471 * same argument as lwip_setsockopt, either called directly or through callback
3472 */
3473 static int
lwip_setsockopt_impl(int s,int level,int optname,const void * optval,socklen_t optlen)3474 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3475 {
3476 int err = 0;
3477 struct lwip_sock *sock = tryget_socket(s);
3478 if (!sock) {
3479 return EBADF;
3480 }
3481
3482 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3483 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3484 done_socket(sock);
3485 return err;
3486 }
3487 #endif
3488
3489 switch (level) {
3490
3491 /* Level: SOL_SOCKET */
3492 case SOL_SOCKET:
3493 switch (optname) {
3494
3495 /* SO_ACCEPTCONN is get-only */
3496
3497 /* The option flags */
3498 case SO_BROADCAST:
3499 case SO_KEEPALIVE:
3500 #if SO_REUSE
3501 case SO_REUSEADDR:
3502 #endif /* SO_REUSE */
3503 if ((optname == SO_BROADCAST) &&
3504 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3505 done_socket(sock);
3506 return ENOPROTOOPT;
3507 }
3508
3509 optname = lwip_sockopt_to_ipopt(optname);
3510
3511 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3512 if (*(const int *)optval) {
3513 ip_set_option(sock->conn->pcb.ip, optname);
3514 } else {
3515 ip_reset_option(sock->conn->pcb.ip, optname);
3516 }
3517 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3518 s, optname, (*(const int *)optval ? "on" : "off")));
3519 break;
3520
3521 /* SO_TYPE is get-only */
3522 /* SO_ERROR is get-only */
3523
3524 #if LWIP_SO_SNDTIMEO
3525 case SO_SNDTIMEO: {
3526 long ms_long;
3527 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3528 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3529 if (ms_long < 0) {
3530 done_socket(sock);
3531 return EINVAL;
3532 }
3533 netconn_set_sendtimeout(sock->conn, ms_long);
3534 break;
3535 }
3536 #endif /* LWIP_SO_SNDTIMEO */
3537 #if LWIP_SO_RCVTIMEO
3538 case SO_RCVTIMEO: {
3539 long ms_long;
3540 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3541 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3542 if (ms_long < 0) {
3543 done_socket(sock);
3544 return EINVAL;
3545 }
3546 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3547 break;
3548 }
3549 #endif /* LWIP_SO_RCVTIMEO */
3550 #if LWIP_SO_RCVBUF
3551 case SO_RCVBUF:
3552 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3553 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3554 break;
3555 #endif /* LWIP_SO_RCVBUF */
3556 #if LWIP_SO_LINGER
3557 case SO_LINGER: {
3558 const struct linger *linger = (const struct linger *)optval;
3559 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3560 if (linger->l_onoff) {
3561 int lingersec = linger->l_linger;
3562 if (lingersec < 0) {
3563 done_socket(sock);
3564 return EINVAL;
3565 }
3566 if (lingersec > 0xFFFF) {
3567 lingersec = 0xFFFF;
3568 }
3569 sock->conn->linger = (s16_t)lingersec;
3570 } else {
3571 sock->conn->linger = -1;
3572 }
3573 }
3574 break;
3575 #endif /* LWIP_SO_LINGER */
3576 #if LWIP_UDP
3577 case SO_NO_CHECK:
3578 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3579 #if LWIP_UDPLITE
3580 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3581 /* this flag is only available for UDP, not for UDP lite */
3582 done_socket(sock);
3583 return EAFNOSUPPORT;
3584 }
3585 #endif /* LWIP_UDPLITE */
3586 if (*(const int *)optval) {
3587 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3588 } else {
3589 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3590 }
3591 break;
3592 #endif /* LWIP_UDP */
3593 case SO_BINDTODEVICE: {
3594 const struct ifreq *iface;
3595 struct netif *n = NULL;
3596
3597 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3598
3599 iface = (const struct ifreq *)optval;
3600 if (iface->ifr_name[0] != 0) {
3601 n = netif_find(iface->ifr_name);
3602 if (n == NULL) {
3603 done_socket(sock);
3604 return ENODEV;
3605 }
3606 }
3607
3608 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3609 #if LWIP_TCP
3610 case NETCONN_TCP:
3611 tcp_bind_netif(sock->conn->pcb.tcp, n);
3612 break;
3613 #endif
3614 #if LWIP_UDP
3615 case NETCONN_UDP:
3616 udp_bind_netif(sock->conn->pcb.udp, n);
3617 break;
3618 #endif
3619 #if LWIP_RAW
3620 case NETCONN_RAW:
3621 raw_bind_netif(sock->conn->pcb.raw, n);
3622 break;
3623 #endif
3624 default:
3625 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3626 break;
3627 }
3628 }
3629 break;
3630 default:
3631 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3632 s, optname));
3633 err = ENOPROTOOPT;
3634 break;
3635 } /* switch (optname) */
3636 break;
3637
3638 /* Level: IPPROTO_IP */
3639 case IPPROTO_IP:
3640 switch (optname) {
3641 case IP_TTL:
3642 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3643 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3644 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3645 s, sock->conn->pcb.ip->ttl));
3646 break;
3647 case IP_TOS:
3648 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3649 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3650 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3651 s, sock->conn->pcb.ip->tos));
3652 break;
3653 #if LWIP_NETBUF_RECVINFO
3654 case IP_PKTINFO:
3655 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3656 if (*(const int *)optval) {
3657 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3658 } else {
3659 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3660 }
3661 break;
3662 #endif /* LWIP_NETBUF_RECVINFO */
3663 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3664 case IP_MULTICAST_TTL:
3665 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3666 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3667 break;
3668 case IP_MULTICAST_IF: {
3669 ip4_addr_t if_addr;
3670 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3671 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3672 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3673 }
3674 break;
3675 case IP_MULTICAST_LOOP:
3676 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3677 if (*(const u8_t *)optval) {
3678 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3679 } else {
3680 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3681 }
3682 break;
3683 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3684 #if LWIP_IGMP
3685 case IP_ADD_MEMBERSHIP:
3686 case IP_DROP_MEMBERSHIP: {
3687 /* If this is a TCP or a RAW socket, ignore these options. */
3688 err_t igmp_err;
3689 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3690 ip4_addr_t if_addr;
3691 ip4_addr_t multi_addr;
3692 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3693 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3694 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3695 if (optname == IP_ADD_MEMBERSHIP) {
3696 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3697 /* cannot track membership (out of memory) */
3698 err = ENOMEM;
3699 igmp_err = ERR_OK;
3700 } else {
3701 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3702 }
3703 } else {
3704 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3705 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3706 }
3707 if (igmp_err != ERR_OK) {
3708 err = EADDRNOTAVAIL;
3709 }
3710 }
3711 break;
3712 #endif /* LWIP_IGMP */
3713 default:
3714 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3715 s, optname));
3716 err = ENOPROTOOPT;
3717 break;
3718 } /* switch (optname) */
3719 break;
3720
3721 #if LWIP_TCP
3722 /* Level: IPPROTO_TCP */
3723 case IPPROTO_TCP:
3724 /* Special case: all IPPROTO_TCP option take an int */
3725 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3726 if (sock->conn->pcb.tcp->state == LISTEN) {
3727 done_socket(sock);
3728 return EINVAL;
3729 }
3730 switch (optname) {
3731 case TCP_NODELAY:
3732 if (*(const int *)optval) {
3733 tcp_nagle_disable(sock->conn->pcb.tcp);
3734 } else {
3735 tcp_nagle_enable(sock->conn->pcb.tcp);
3736 }
3737 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3738 s, (*(const int *)optval) ? "on" : "off") );
3739 break;
3740 case TCP_KEEPALIVE:
3741 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3742 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3743 s, sock->conn->pcb.tcp->keep_idle));
3744 break;
3745
3746 #if LWIP_TCP_KEEPALIVE
3747 case TCP_KEEPIDLE:
3748 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3749 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3750 s, sock->conn->pcb.tcp->keep_idle));
3751 break;
3752 case TCP_KEEPINTVL:
3753 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3754 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3755 s, sock->conn->pcb.tcp->keep_intvl));
3756 break;
3757 case TCP_KEEPCNT:
3758 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3759 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3760 s, sock->conn->pcb.tcp->keep_cnt));
3761 break;
3762 #endif /* LWIP_TCP_KEEPALIVE */
3763 default:
3764 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3765 s, optname));
3766 err = ENOPROTOOPT;
3767 break;
3768 } /* switch (optname) */
3769 break;
3770 #endif /* LWIP_TCP*/
3771
3772 #if LWIP_IPV6
3773 /* Level: IPPROTO_IPV6 */
3774 case IPPROTO_IPV6:
3775 switch (optname) {
3776 #if LWIP_IPV6 && LWIP_RAW
3777 case IPV6_CHECKSUM:
3778 err = lwip_setsockopt_impl_ipv6_checksum(s, sock, optval, optlen);
3779 if (err) {
3780 return err;
3781 }
3782 break;
3783 #endif /* LWIP_IPV6 && LWIP_RAW */
3784 case IPV6_V6ONLY:
3785 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3786 if (*(const int *)optval) {
3787 netconn_set_ipv6only(sock->conn, 1);
3788 } else {
3789 netconn_set_ipv6only(sock->conn, 0);
3790 }
3791 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3792 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3793 break;
3794 #if LWIP_IPV6_MLD
3795 case IPV6_JOIN_GROUP:
3796 case IPV6_LEAVE_GROUP: {
3797 /* If this is a TCP or a RAW socket, ignore these options. */
3798 err_t mld6_err;
3799 struct netif *netif;
3800 ip6_addr_t multi_addr;
3801 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3802 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3803 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3804 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3805 #ifdef LOSCFG_NET_CONTAINER
3806 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface, get_net_group_from_ippcb(sock->conn->pcb.ip));
3807 #else
3808 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3809 #endif
3810 if (netif == NULL) {
3811 err = EADDRNOTAVAIL;
3812 break;
3813 }
3814
3815 if (optname == IPV6_JOIN_GROUP) {
3816 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3817 /* cannot track membership (out of memory) */
3818 err = ENOMEM;
3819 mld6_err = ERR_OK;
3820 } else {
3821 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3822 }
3823 } else {
3824 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3825 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3826 }
3827 if (mld6_err != ERR_OK) {
3828 err = EADDRNOTAVAIL;
3829 }
3830 }
3831 break;
3832 #endif /* LWIP_IPV6_MLD */
3833 default:
3834 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3835 s, optname));
3836 err = ENOPROTOOPT;
3837 break;
3838 } /* switch (optname) */
3839 break;
3840 #endif /* LWIP_IPV6 */
3841
3842 #if LWIP_UDP && LWIP_UDPLITE
3843 /* Level: IPPROTO_UDPLITE */
3844 case IPPROTO_UDPLITE:
3845 /* Special case: all IPPROTO_UDPLITE option take an int */
3846 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3847 /* If this is no UDP lite socket, ignore any options. */
3848 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3849 done_socket(sock);
3850 return ENOPROTOOPT;
3851 }
3852 switch (optname) {
3853 case UDPLITE_SEND_CSCOV:
3854 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3855 /* don't allow illegal values! */
3856 sock->conn->pcb.udp->chksum_len_tx = 8;
3857 } else {
3858 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3859 }
3860 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3861 s, (*(const int *)optval)) );
3862 break;
3863 case UDPLITE_RECV_CSCOV:
3864 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3865 /* don't allow illegal values! */
3866 sock->conn->pcb.udp->chksum_len_rx = 8;
3867 } else {
3868 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3869 }
3870 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3871 s, (*(const int *)optval)) );
3872 break;
3873 default:
3874 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3875 s, optname));
3876 err = ENOPROTOOPT;
3877 break;
3878 } /* switch (optname) */
3879 break;
3880 #endif /* LWIP_UDP */
3881 /* Level: IPPROTO_RAW */
3882 case IPPROTO_RAW:
3883 switch (optname) {
3884 #if LWIP_IPV6 && LWIP_RAW
3885 case IPV6_CHECKSUM:
3886 err = lwip_setsockopt_impl_ipv6_checksum(s, sock, optval, optlen);
3887 if (err) {
3888 return err;
3889 }
3890 break;
3891 #endif /* LWIP_IPV6 && LWIP_RAW */
3892 default:
3893 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3894 s, optname));
3895 err = ENOPROTOOPT;
3896 break;
3897 } /* switch (optname) */
3898 break;
3899 default:
3900 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3901 s, level, optname));
3902 err = ENOPROTOOPT;
3903 break;
3904 } /* switch (level) */
3905
3906 done_socket(sock);
3907 return err;
3908 }
3909
3910 int
lwip_ioctl(int s,long cmd,void * argp)3911 lwip_ioctl(int s, long cmd, void *argp)
3912 {
3913 struct lwip_sock *sock = get_socket(s);
3914 u8_t val;
3915 #if LWIP_SO_RCVBUF
3916 int recv_avail;
3917 #endif /* LWIP_SO_RCVBUF */
3918
3919 if (!sock) {
3920 return -1;
3921 }
3922
3923 switch (cmd) {
3924 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3925 case FIONREAD:
3926 if (!argp) {
3927 set_errno(EINVAL);
3928 done_socket(sock);
3929 return -1;
3930 }
3931 #if LWIP_FIONREAD_LINUXMODE
3932 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3933 struct netbuf *nb;
3934 if (sock->lastdata.netbuf) {
3935 nb = sock->lastdata.netbuf;
3936 *((int *)argp) = nb->p->tot_len;
3937 } else {
3938 struct netbuf *rxbuf;
3939 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3940 if (err != ERR_OK) {
3941 *((int *)argp) = 0;
3942 } else {
3943 sock->lastdata.netbuf = rxbuf;
3944 *((int *)argp) = rxbuf->p->tot_len;
3945 }
3946 }
3947 done_socket(sock);
3948 return 0;
3949 }
3950 #endif /* LWIP_FIONREAD_LINUXMODE */
3951
3952 #if LWIP_SO_RCVBUF
3953 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3954 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3955 if (recv_avail < 0) {
3956 recv_avail = 0;
3957 }
3958
3959 /* Check if there is data left from the last recv operation. /maq 041215 */
3960 if (sock->lastdata.netbuf) {
3961 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3962 recv_avail += sock->lastdata.pbuf->tot_len;
3963 } else {
3964 recv_avail += sock->lastdata.netbuf->p->tot_len;
3965 }
3966 }
3967 *((int *)argp) = recv_avail;
3968
3969 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3970 set_errno(0);
3971 done_socket(sock);
3972 return 0;
3973 #else /* LWIP_SO_RCVBUF */
3974 break;
3975 #endif /* LWIP_SO_RCVBUF */
3976 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3977
3978 case (long)FIONBIO:
3979 val = 0;
3980 if (argp && *(int *)argp) {
3981 val = 1;
3982 }
3983 netconn_set_nonblocking(sock->conn, val);
3984 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3985 set_errno(0);
3986 done_socket(sock);
3987 return 0;
3988
3989 default:
3990 IOCTL_CMD_CASE_HANDLER();
3991 break;
3992 } /* switch (cmd) */
3993 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3994 set_errno(ENOSYS); /* not yet implemented */
3995 done_socket(sock);
3996 return -1;
3997 }
3998
3999 /** A minimal implementation of fcntl.
4000 * Currently only the commands F_GETFL and F_SETFL are implemented.
4001 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
4002 * the flag O_NONBLOCK is implemented for F_SETFL.
4003 */
4004 int
lwip_fcntl(int s,int cmd,int val)4005 lwip_fcntl(int s, int cmd, int val)
4006 {
4007 struct lwip_sock *sock = get_socket(s);
4008 int ret = -1;
4009 int op_mode = 0;
4010
4011 if (!sock) {
4012 return -1;
4013 }
4014
4015 switch (cmd) {
4016 case F_GETFL:
4017 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
4018 set_errno(0);
4019
4020 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
4021 #if LWIP_TCPIP_CORE_LOCKING
4022 LOCK_TCPIP_CORE();
4023 #else
4024 SYS_ARCH_DECL_PROTECT(lev);
4025 /* the proper thing to do here would be to get into the tcpip_thread,
4026 but locking should be OK as well since we only *read* some flags */
4027 SYS_ARCH_PROTECT(lev);
4028 #endif
4029 #if LWIP_TCP
4030 if (sock->conn->pcb.tcp) {
4031 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
4032 op_mode |= O_RDONLY;
4033 }
4034 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
4035 op_mode |= O_WRONLY;
4036 }
4037 }
4038 #endif
4039 #if LWIP_TCPIP_CORE_LOCKING
4040 UNLOCK_TCPIP_CORE();
4041 #else
4042 SYS_ARCH_UNPROTECT(lev);
4043 #endif
4044 } else {
4045 op_mode |= O_RDWR;
4046 }
4047
4048 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
4049 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
4050
4051 break;
4052 case F_SETFL:
4053 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
4054 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
4055 if ((val & ~O_NONBLOCK) == 0) {
4056 /* only O_NONBLOCK, all other bits are zero */
4057 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
4058 ret = 0;
4059 set_errno(0);
4060 } else {
4061 set_errno(ENOSYS); /* not yet implemented */
4062 }
4063 break;
4064 default:
4065 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
4066 set_errno(ENOSYS); /* not yet implemented */
4067 break;
4068 }
4069 done_socket(sock);
4070 return ret;
4071 }
4072
4073 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
4074 int
fcntl(int s,int cmd,...)4075 fcntl(int s, int cmd, ...)
4076 {
4077 va_list ap;
4078 int val;
4079
4080 va_start(ap, cmd);
4081 val = va_arg(ap, int);
4082 va_end(ap);
4083 return lwip_fcntl(s, cmd, val);
4084 }
4085 #endif
4086
4087 const char *
lwip_inet_ntop(int af,const void * src,char * dst,socklen_t size)4088 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
4089 {
4090 const char *ret = NULL;
4091 int size_int = (int)size;
4092 if (size_int < 0) {
4093 set_errno(ENOSPC);
4094 return NULL;
4095 }
4096 switch (af) {
4097 #if LWIP_IPV4
4098 case AF_INET:
4099 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
4100 if (ret == NULL) {
4101 set_errno(ENOSPC);
4102 }
4103 break;
4104 #endif
4105 #if LWIP_IPV6
4106 case AF_INET6:
4107 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
4108 if (ret == NULL) {
4109 set_errno(ENOSPC);
4110 }
4111 break;
4112 #endif
4113 default:
4114 set_errno(EAFNOSUPPORT);
4115 break;
4116 }
4117 return ret;
4118 }
4119
4120 int
lwip_inet_pton(int af,const char * src,void * dst)4121 lwip_inet_pton(int af, const char *src, void *dst)
4122 {
4123 int err;
4124 switch (af) {
4125 #if LWIP_IPV4
4126 case AF_INET:
4127 err = ip4addr_aton(src, (ip4_addr_t *)dst);
4128 break;
4129 #endif
4130 #if LWIP_IPV6
4131 case AF_INET6: {
4132 /* convert into temporary variable since ip6_addr_t might be larger
4133 than in6_addr when scopes are enabled */
4134 ip6_addr_t addr;
4135 err = ip6addr_aton(src, &addr);
4136 if (err) {
4137 memcpy(dst, &addr.addr, sizeof(addr.addr));
4138 }
4139 break;
4140 }
4141 #endif
4142 default:
4143 err = -1;
4144 set_errno(EAFNOSUPPORT);
4145 break;
4146 }
4147 return err;
4148 }
4149
4150 #if LWIP_IGMP
4151 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
4152 *
4153 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4154 *
4155 * @return 1 on success, 0 on failure
4156 */
4157 static int
lwip_socket_register_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4158 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4159 {
4160 struct lwip_sock *sock = get_socket(s);
4161 int i;
4162
4163 if (!sock) {
4164 return 0;
4165 }
4166
4167 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4168 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
4169 socket_ipv4_multicast_memberships[i].sock = sock;
4170 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
4171 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
4172 done_socket(sock);
4173 return 1;
4174 }
4175 }
4176 done_socket(sock);
4177 return 0;
4178 }
4179
4180 /** Unregister a previously registered membership. This prevents dropping the membership
4181 * on socket close.
4182 *
4183 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4184 */
4185 static void
lwip_socket_unregister_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4186 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4187 {
4188 struct lwip_sock *sock = get_socket(s);
4189 int i;
4190
4191 if (!sock) {
4192 return;
4193 }
4194
4195 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4196 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4197 ip4_addr_eq(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4198 ip4_addr_eq(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4199 socket_ipv4_multicast_memberships[i].sock = NULL;
4200 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4201 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4202 break;
4203 }
4204 }
4205 done_socket(sock);
4206 }
4207
4208 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4209 *
4210 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4211 */
4212 static void
lwip_socket_drop_registered_memberships(int s)4213 lwip_socket_drop_registered_memberships(int s)
4214 {
4215 struct lwip_sock *sock = get_socket(s);
4216 int i;
4217
4218 if (!sock) {
4219 return;
4220 }
4221
4222 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4223 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4224 ip_addr_t multi_addr, if_addr;
4225 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4226 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4227 socket_ipv4_multicast_memberships[i].sock = NULL;
4228 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4229 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4230
4231 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4232 }
4233 }
4234 done_socket(sock);
4235 }
4236 #endif /* LWIP_IGMP */
4237
4238 #if LWIP_IPV6_MLD
4239 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4240 *
4241 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4242 *
4243 * @return 1 on success, 0 on failure
4244 */
4245 static int
lwip_socket_register_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4246 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4247 {
4248 struct lwip_sock *sock = get_socket(s);
4249 int i;
4250
4251 if (!sock) {
4252 return 0;
4253 }
4254
4255 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4256 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4257 socket_ipv6_multicast_memberships[i].sock = sock;
4258 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4259 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4260 done_socket(sock);
4261 return 1;
4262 }
4263 }
4264 done_socket(sock);
4265 return 0;
4266 }
4267
4268 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4269 * on socket close.
4270 *
4271 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4272 */
4273 static void
lwip_socket_unregister_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4274 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4275 {
4276 struct lwip_sock *sock = get_socket(s);
4277 int i;
4278
4279 if (!sock) {
4280 return;
4281 }
4282
4283 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4284 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4285 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4286 ip6_addr_eq(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4287 socket_ipv6_multicast_memberships[i].sock = NULL;
4288 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4289 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4290 break;
4291 }
4292 }
4293 done_socket(sock);
4294 }
4295
4296 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4297 *
4298 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4299 */
4300 static void
lwip_socket_drop_registered_mld6_memberships(int s)4301 lwip_socket_drop_registered_mld6_memberships(int s)
4302 {
4303 struct lwip_sock *sock = get_socket(s);
4304 int i;
4305
4306 if (!sock) {
4307 return;
4308 }
4309
4310 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4311 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4312 ip_addr_t multi_addr;
4313 u8_t if_idx;
4314
4315 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4316 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4317
4318 socket_ipv6_multicast_memberships[i].sock = NULL;
4319 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4320 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4321
4322 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4323 }
4324 }
4325 done_socket(sock);
4326 }
4327 #endif /* LWIP_IPV6_MLD */
4328
4329 #endif /* LWIP_SOCKET */
4330