1 /*
2 * libiio - Library for interfacing industrial I/O (IIO) devices
3 *
4 * Copyright (C) 2014-2015 Analog Devices, Inc.
5 * Author: Paul Cercueil <paul.cercueil@analog.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * */
18
19 #include "iio-config.h"
20 #include "iio-private.h"
21 #include "iio-lock.h"
22 #include "iiod-client.h"
23
24 #include <errno.h>
25 #include <fcntl.h>
26 #include <stdbool.h>
27 #include <string.h>
28 #include <sys/types.h>
29 #include <time.h>
30
31 #ifdef _WIN32
32 #include <winsock2.h>
33 #include <ws2tcpip.h>
34 #define close(s) closesocket(s)
35
36 /* winsock2.h defines ERROR, we don't want that */
37 #undef ERROR
38
39 #else /* _WIN32 */
40 #include <arpa/inet.h>
41 #include <netdb.h>
42 #include <netinet/in.h>
43 #include <netinet/tcp.h>
44 #include <net/if.h>
45 #include <sys/mman.h>
46 #include <poll.h>
47 #include <sys/socket.h>
48 #include <unistd.h>
49 #endif /* _WIN32 */
50
51 #ifdef HAVE_AVAHI
52 #include <avahi-client/client.h>
53 #include <avahi-common/error.h>
54 #include <avahi-client/lookup.h>
55 #include <avahi-common/simple-watch.h>
56 #endif
57
58 #include "debug.h"
59
60 #define DEFAULT_TIMEOUT_MS 5000
61
62 #define _STRINGIFY(x) #x
63 #define STRINGIFY(x) _STRINGIFY(x)
64
65 #define IIOD_PORT 30431
66 #define IIOD_PORT_STR STRINGIFY(IIOD_PORT)
67
68 struct iio_network_io_context {
69 int fd;
70
71 /* Only buffer IO contexts can be cancelled. */
72 bool cancellable;
73 bool cancelled;
74 #if defined(_WIN32)
75 WSAEVENT events[2];
76 #elif defined(WITH_NETWORK_EVENTFD)
77 int cancel_fd[1]; /* eventfd */
78 #else
79 int cancel_fd[2]; /* pipe */
80 #endif
81 unsigned int timeout_ms;
82 };
83
84 struct iio_context_pdata {
85 struct iio_network_io_context io_ctx;
86 struct addrinfo *addrinfo;
87 struct iio_mutex *lock;
88 struct iiod_client *iiod_client;
89 bool msg_trunc_supported;
90 };
91
92 struct iio_device_pdata {
93 struct iio_network_io_context io_ctx;
94 #ifdef WITH_NETWORK_GET_BUFFER
95 int memfd;
96 void *mmap_addr;
97 size_t mmap_len;
98 #endif
99 bool wait_for_err_code, is_cyclic, is_tx;
100 struct iio_mutex *lock;
101 };
102
103 #ifdef _WIN32
104
set_blocking_mode(int s,bool blocking)105 static int set_blocking_mode(int s, bool blocking)
106 {
107 unsigned long nonblock;
108 int ret;
109
110 nonblock = blocking ? 0 : 1;
111
112 ret = ioctlsocket(s, FIONBIO, &nonblock);
113 if (ret == SOCKET_ERROR) {
114 ret = -WSAGetLastError();
115 return ret;
116 }
117
118 return 0;
119 }
120
setup_cancel(struct iio_network_io_context * io_ctx)121 static int setup_cancel(struct iio_network_io_context *io_ctx)
122 {
123 io_ctx->events[0] = WSACreateEvent();
124 if (io_ctx->events[0] == WSA_INVALID_EVENT)
125 return -ENOMEM; /* Pretty much the only error that can happen */
126
127 io_ctx->events[1] = WSACreateEvent();
128 if (io_ctx->events[1] == WSA_INVALID_EVENT) {
129 WSACloseEvent(io_ctx->events[0]);
130 return -ENOMEM;
131 }
132
133 return 0;
134 }
135
cleanup_cancel(struct iio_network_io_context * io_ctx)136 static void cleanup_cancel(struct iio_network_io_context *io_ctx)
137 {
138 WSACloseEvent(io_ctx->events[0]);
139 WSACloseEvent(io_ctx->events[1]);
140 }
141
do_cancel(struct iio_network_io_context * io_ctx)142 static void do_cancel(struct iio_network_io_context *io_ctx)
143 {
144 WSASetEvent(io_ctx->events[1]);
145 }
146
wait_cancellable(struct iio_network_io_context * io_ctx,bool read)147 static int wait_cancellable(struct iio_network_io_context *io_ctx, bool read)
148 {
149 long wsa_events = FD_CLOSE;
150 DWORD ret;
151
152 if (!io_ctx->cancellable)
153 return 0;
154
155 if (read)
156 wsa_events |= FD_READ;
157 else
158 wsa_events |= FD_WRITE;
159
160 WSAEventSelect(io_ctx->fd, NULL, 0);
161 WSAResetEvent(io_ctx->events[0]);
162 WSAEventSelect(io_ctx->fd, io_ctx->events[0], wsa_events);
163
164 ret = WSAWaitForMultipleEvents(2, io_ctx->events, FALSE,
165 WSA_INFINITE, FALSE);
166
167 if (ret == WSA_WAIT_EVENT_0 + 1)
168 return -EBADF;
169
170 return 0;
171 }
172
network_get_error(void)173 static int network_get_error(void)
174 {
175 return -WSAGetLastError();
176 }
177
network_should_retry(int err)178 static bool network_should_retry(int err)
179 {
180 return err == -WSAEWOULDBLOCK || err == -WSAETIMEDOUT;
181 }
182
network_is_interrupted(int err)183 static bool network_is_interrupted(int err)
184 {
185 return false;
186 }
187
network_connect_in_progress(int err)188 static bool network_connect_in_progress(int err)
189 {
190 return err == -WSAEWOULDBLOCK;
191 }
192
193 #define NETWORK_ERR_TIMEOUT WSAETIMEDOUT
194
195 #else
196
set_blocking_mode(int fd,bool blocking)197 static int set_blocking_mode(int fd, bool blocking)
198 {
199 int ret = fcntl(fd, F_GETFL, 0);
200 if (ret < 0)
201 return -errno;
202
203 if (blocking)
204 ret &= ~O_NONBLOCK;
205 else
206 ret |= O_NONBLOCK;
207
208 ret = fcntl(fd, F_SETFL, ret);
209 return ret < 0 ? -errno : 0;
210 }
211
212 #include <poll.h>
213
214 #if defined(WITH_NETWORK_EVENTFD)
215
216 #include <sys/eventfd.h>
217
create_cancel_fd(struct iio_network_io_context * io_ctx)218 static int create_cancel_fd(struct iio_network_io_context *io_ctx)
219 {
220 io_ctx->cancel_fd[0] = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
221 if (io_ctx->cancel_fd[0] < 0)
222 return -errno;
223 return 0;
224 }
225
cleanup_cancel(struct iio_network_io_context * io_ctx)226 static void cleanup_cancel(struct iio_network_io_context *io_ctx)
227 {
228 close(io_ctx->cancel_fd[0]);
229 }
230
231 #define CANCEL_WR_FD 0
232
233 #else
234
create_cancel_fd(struct iio_network_io_context * io_ctx)235 static int create_cancel_fd(struct iio_network_io_context *io_ctx)
236 {
237 int ret;
238
239 #ifdef HAS_PIPE2
240 ret = pipe2(io_ctx->cancel_fd, O_CLOEXEC | O_NONBLOCK);
241 if (ret < 0 && errno != ENOSYS) /* If ENOSYS try pipe() */
242 return -errno;
243 #endif
244 ret = pipe(io_ctx->cancel_fd);
245 if (ret < 0)
246 return -errno;
247 ret = set_blocking_mode(io_ctx->cancel_fd[0], false);
248 if (ret < 0)
249 goto err_close;
250 ret = set_blocking_mode(io_ctx->cancel_fd[1], false);
251 if (ret < 0)
252 goto err_close;
253
254 return 0;
255 err_close:
256 close(io_ctx->cancel_fd[0]);
257 close(io_ctx->cancel_fd[1]);
258 return ret;
259 }
260
cleanup_cancel(struct iio_network_io_context * io_ctx)261 static void cleanup_cancel(struct iio_network_io_context *io_ctx)
262 {
263 close(io_ctx->cancel_fd[0]);
264 close(io_ctx->cancel_fd[1]);
265 }
266
267 #define CANCEL_WR_FD 1
268
269 #endif
270
setup_cancel(struct iio_network_io_context * io_ctx)271 static int setup_cancel(struct iio_network_io_context *io_ctx)
272 {
273 int ret;
274
275 ret = set_blocking_mode(io_ctx->fd, false);
276 if (ret)
277 return ret;
278
279 return create_cancel_fd(io_ctx);
280 }
281
do_cancel(struct iio_network_io_context * io_ctx)282 static void do_cancel(struct iio_network_io_context *io_ctx)
283 {
284 uint64_t event = 1;
285 int ret;
286
287 ret = write(io_ctx->cancel_fd[CANCEL_WR_FD], &event, sizeof(event));
288 if (ret == -1) {
289 /* If this happens something went very seriously wrong */
290 char err_str[1024];
291 iio_strerror(errno, err_str, sizeof(err_str));
292 ERROR("Unable to signal cancellation event: %s\n", err_str);
293 }
294 }
295
wait_cancellable(struct iio_network_io_context * io_ctx,bool read)296 static int wait_cancellable(struct iio_network_io_context *io_ctx, bool read)
297 {
298 struct pollfd pfd[2];
299 int ret;
300
301 if (!io_ctx->cancellable)
302 return 0;
303
304 memset(pfd, 0, sizeof(pfd));
305
306 pfd[0].fd = io_ctx->fd;
307 if (read)
308 pfd[0].events = POLLIN;
309 else
310 pfd[0].events = POLLOUT;
311 pfd[1].fd = io_ctx->cancel_fd[0];
312 pfd[1].events = POLLIN;
313
314 do {
315 int timeout_ms;
316
317 if (io_ctx->timeout_ms > 0)
318 timeout_ms = (int) io_ctx->timeout_ms;
319 else
320 timeout_ms = -1;
321
322 do {
323 ret = poll(pfd, 2, timeout_ms);
324 } while (ret == -1 && errno == EINTR);
325
326 if (ret == -1)
327 return -errno;
328 if (!ret)
329 return -EPIPE;
330
331 if (pfd[1].revents & POLLIN)
332 return -EBADF;
333 } while (!(pfd[0].revents & (pfd[0].events | POLLERR | POLLHUP)));
334
335 return 0;
336 }
337
network_get_error(void)338 static int network_get_error(void)
339 {
340 return -errno;
341 }
342
network_should_retry(int err)343 static bool network_should_retry(int err)
344 {
345 return err == -EAGAIN;
346 }
347
network_is_interrupted(int err)348 static bool network_is_interrupted(int err)
349 {
350 return err == -EINTR;
351 }
352
network_connect_in_progress(int err)353 static bool network_connect_in_progress(int err)
354 {
355 return err == -EINPROGRESS;
356 }
357
358 #define NETWORK_ERR_TIMEOUT ETIMEDOUT
359
360 #endif
361
362 #ifdef HAVE_AVAHI
363 struct avahi_discovery_data {
364 AvahiSimplePoll *poll;
365 AvahiAddress *address;
366 uint16_t *port;
367 bool found, resolved;
368 };
369
__avahi_resolver_cb(AvahiServiceResolver * resolver,__notused AvahiIfIndex iface,__notused AvahiProtocol proto,__notused AvahiResolverEvent event,__notused const char * name,__notused const char * type,__notused const char * domain,__notused const char * host_name,const AvahiAddress * address,uint16_t port,__notused AvahiStringList * txt,__notused AvahiLookupResultFlags flags,void * d)370 static void __avahi_resolver_cb(AvahiServiceResolver *resolver,
371 __notused AvahiIfIndex iface, __notused AvahiProtocol proto,
372 __notused AvahiResolverEvent event, __notused const char *name,
373 __notused const char *type, __notused const char *domain,
374 __notused const char *host_name, const AvahiAddress *address,
375 uint16_t port, __notused AvahiStringList *txt,
376 __notused AvahiLookupResultFlags flags, void *d)
377 {
378 struct avahi_discovery_data *ddata = (struct avahi_discovery_data *) d;
379
380 memcpy(ddata->address, address, sizeof(*address));
381 *ddata->port = port;
382 ddata->resolved = true;
383 avahi_service_resolver_free(resolver);
384 }
385
__avahi_browser_cb(AvahiServiceBrowser * browser,AvahiIfIndex iface,AvahiProtocol proto,AvahiBrowserEvent event,const char * name,const char * type,const char * domain,__notused AvahiLookupResultFlags flags,void * d)386 static void __avahi_browser_cb(AvahiServiceBrowser *browser,
387 AvahiIfIndex iface, AvahiProtocol proto,
388 AvahiBrowserEvent event, const char *name,
389 const char *type, const char *domain,
390 __notused AvahiLookupResultFlags flags, void *d)
391 {
392 struct avahi_discovery_data *ddata = (struct avahi_discovery_data *) d;
393 struct AvahiClient *client = avahi_service_browser_get_client(browser);
394
395 switch (event) {
396 default:
397 case AVAHI_BROWSER_NEW:
398 ddata->found = !!avahi_service_resolver_new(client, iface,
399 proto, name, type, domain,
400 AVAHI_PROTO_UNSPEC, 0,
401 __avahi_resolver_cb, d);
402 break;
403 case AVAHI_BROWSER_ALL_FOR_NOW:
404 if (ddata->found) {
405 while (!ddata->resolved) {
406 struct timespec ts;
407 ts.tv_sec = 0;
408 ts.tv_nsec = 4000000;
409 nanosleep(&ts, NULL);
410 }
411 }
412 /* fall-through */
413 case AVAHI_BROWSER_FAILURE:
414 avahi_simple_poll_quit(ddata->poll);
415 /* fall-through */
416 case AVAHI_BROWSER_CACHE_EXHAUSTED:
417 break;
418 }
419 }
420
discover_host(AvahiAddress * addr,uint16_t * port)421 static int discover_host(AvahiAddress *addr, uint16_t *port)
422 {
423 struct avahi_discovery_data ddata;
424 int ret = 0;
425 AvahiClient *client;
426 AvahiServiceBrowser *browser;
427 AvahiSimplePoll *poll = avahi_simple_poll_new();
428 if (!poll)
429 return -ENOMEM;
430
431 client = avahi_client_new(avahi_simple_poll_get(poll),
432 0, NULL, NULL, &ret);
433 if (!client) {
434 ERROR("Unable to start ZeroConf client :%s\n",
435 avahi_strerror(ret));
436 goto err_free_poll;
437 }
438
439 memset(&ddata, 0, sizeof(ddata));
440 ddata.poll = poll;
441 ddata.address = addr;
442 ddata.port = port;
443
444 browser = avahi_service_browser_new(client,
445 AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
446 "_iio._tcp", NULL, 0, __avahi_browser_cb, &ddata);
447 if (!browser) {
448 ret = avahi_client_errno(client);
449 ERROR("Unable to create ZeroConf browser: %s\n",
450 avahi_strerror(ret));
451 goto err_free_client;
452 }
453
454 DEBUG("Trying to discover host\n");
455 avahi_simple_poll_loop(poll);
456
457 if (!ddata.found)
458 ret = ENXIO;
459
460 avahi_service_browser_free(browser);
461 err_free_client:
462 avahi_client_free(client);
463 err_free_poll:
464 avahi_simple_poll_free(poll);
465 return -ret; /* we want a negative error code */
466 }
467 #endif /* HAVE_AVAHI */
468
network_recv(struct iio_network_io_context * io_ctx,void * data,size_t len,int flags)469 static ssize_t network_recv(struct iio_network_io_context *io_ctx,
470 void *data, size_t len, int flags)
471 {
472 ssize_t ret;
473 int err;
474
475 while (1) {
476 ret = wait_cancellable(io_ctx, true);
477 if (ret < 0)
478 return ret;
479
480 ret = recv(io_ctx->fd, data, (int) len, flags);
481 if (ret == 0)
482 return -EPIPE;
483 else if (ret > 0)
484 break;
485
486 err = network_get_error();
487 if (network_should_retry(err)) {
488 if (io_ctx->cancellable)
489 continue;
490 else
491 return -EPIPE;
492 } else if (!network_is_interrupted(err)) {
493 return (ssize_t) err;
494 }
495 }
496 return ret;
497 }
498
network_send(struct iio_network_io_context * io_ctx,const void * data,size_t len,int flags)499 static ssize_t network_send(struct iio_network_io_context *io_ctx,
500 const void *data, size_t len, int flags)
501 {
502 ssize_t ret;
503 int err;
504
505 while (1) {
506 ret = wait_cancellable(io_ctx, false);
507 if (ret < 0)
508 return ret;
509
510 ret = send(io_ctx->fd, data, (int) len, flags);
511 if (ret == 0)
512 return -EPIPE;
513 else if (ret > 0)
514 break;
515
516 err = network_get_error();
517 if (network_should_retry(err)) {
518 if (io_ctx->cancellable)
519 continue;
520 else
521 return -EPIPE;
522 } else if (!network_is_interrupted(err)) {
523 return (ssize_t) err;
524 }
525 }
526
527 return ret;
528 }
529
write_all(struct iio_network_io_context * io_ctx,const void * src,size_t len)530 static ssize_t write_all(struct iio_network_io_context *io_ctx,
531 const void *src, size_t len)
532 {
533 uintptr_t ptr = (uintptr_t) src;
534 while (len) {
535 ssize_t ret = network_send(io_ctx, (const void *) ptr, len, 0);
536 if (ret < 0)
537 return ret;
538 ptr += ret;
539 len -= ret;
540 }
541 return (ssize_t)(ptr - (uintptr_t) src);
542 }
543
write_command(struct iio_network_io_context * io_ctx,const char * cmd)544 static ssize_t write_command(struct iio_network_io_context *io_ctx,
545 const char *cmd)
546 {
547 ssize_t ret;
548
549 DEBUG("Writing command: %s\n", cmd);
550 ret = write_all(io_ctx, cmd, strlen(cmd));
551 if (ret < 0) {
552 char buf[1024];
553 iio_strerror(-ret, buf, sizeof(buf));
554 ERROR("Unable to send command: %s\n", buf);
555 }
556 return ret;
557 }
558
network_cancel(const struct iio_device * dev)559 static void network_cancel(const struct iio_device *dev)
560 {
561 struct iio_device_pdata *ppdata = dev->pdata;
562
563 do_cancel(&ppdata->io_ctx);
564
565 ppdata->io_ctx.cancelled = true;
566 }
567
568 #ifndef _WIN32
569
570 /* Use it if available */
571 #ifndef SOCK_CLOEXEC
572 #define SOCK_CLOEXEC 0
573 #endif
574
do_create_socket(const struct addrinfo * addrinfo)575 static int do_create_socket(const struct addrinfo *addrinfo)
576 {
577 int fd;
578
579 fd = socket(addrinfo->ai_family, addrinfo->ai_socktype | SOCK_CLOEXEC, 0);
580 if (fd < 0)
581 return -errno;
582
583 return fd;
584 }
585
set_socket_timeout(int fd,unsigned int timeout)586 static int set_socket_timeout(int fd, unsigned int timeout)
587 {
588 struct timeval tv;
589
590 tv.tv_sec = timeout / 1000;
591 tv.tv_usec = (timeout % 1000) * 1000;
592 if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)) < 0 ||
593 setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO,
594 &tv, sizeof(tv)) < 0)
595 return -errno;
596 else
597 return 0;
598 }
599 #else
600
601 /* Use it if available */
602 #ifndef WSA_FLAG_NO_HANDLE_INHERIT
603 #define WSA_FLAG_NO_HANDLE_INHERIT 0
604 #endif
605
do_create_socket(const struct addrinfo * addrinfo)606 static int do_create_socket(const struct addrinfo *addrinfo)
607 {
608 SOCKET s;
609
610 s = WSASocketW(addrinfo->ai_family, addrinfo->ai_socktype, 0, NULL, 0,
611 WSA_FLAG_NO_HANDLE_INHERIT | WSA_FLAG_OVERLAPPED);
612 if (s == INVALID_SOCKET)
613 return -WSAGetLastError();
614
615 return (int) s;
616 }
617
set_socket_timeout(int fd,unsigned int timeout)618 static int set_socket_timeout(int fd, unsigned int timeout)
619 {
620 if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO,
621 (const char *) &timeout, sizeof(timeout)) < 0 ||
622 setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO,
623 (const char *) &timeout, sizeof(timeout)) < 0)
624 return -WSAGetLastError();
625 else
626 return 0;
627 }
628 #endif /* !_WIN32 */
629
630 /* The purpose of this function is to provide a version of connect()
631 * that does not ignore timeouts... */
do_connect(int fd,const struct addrinfo * addrinfo,unsigned int timeout)632 static int do_connect(int fd, const struct addrinfo *addrinfo,
633 unsigned int timeout)
634 {
635 int ret, error;
636 socklen_t len;
637 #ifdef _WIN32
638 struct timeval tv;
639 struct timeval *ptv;
640 fd_set set;
641 #else
642 struct pollfd pfd;
643 #endif
644
645 ret = set_blocking_mode(fd, false);
646 if (ret < 0)
647 return ret;
648
649 ret = connect(fd, addrinfo->ai_addr, (int) addrinfo->ai_addrlen);
650 if (ret < 0) {
651 ret = network_get_error();
652 if (!network_connect_in_progress(ret))
653 return ret;
654 }
655
656 #ifdef _WIN32
657 FD_ZERO(&set);
658 FD_SET(fd, &set);
659
660 if (timeout != 0) {
661 tv.tv_sec = timeout / 1000;
662 tv.tv_usec = (timeout % 1000) * 1000;
663 ptv = &tv;
664 } else {
665 ptv = NULL;
666 }
667
668 ret = select(fd + 1, NULL, &set, &set, ptv);
669 #else
670 pfd.fd = fd;
671 pfd.events = POLLOUT | POLLERR;
672 pfd.revents = 0;
673
674 do {
675 ret = poll(&pfd, 1, timeout);
676 } while (ret == -1 && errno == EINTR);
677 #endif
678
679 if (ret < 0)
680 return network_get_error();
681
682 if (ret == 0)
683 return -NETWORK_ERR_TIMEOUT;
684
685 /* Verify that we don't have an error */
686 len = sizeof(error);
687 ret = getsockopt(fd, SOL_SOCKET, SO_ERROR, (char *)&error, &len);
688 if(ret < 0)
689 return network_get_error();
690
691 if (error)
692 return -error;
693
694 ret = set_blocking_mode(fd, true);
695 if (ret < 0)
696 return ret;
697
698 return 0;
699 }
700
create_socket(const struct addrinfo * addrinfo,unsigned int timeout)701 static int create_socket(const struct addrinfo *addrinfo, unsigned int timeout)
702 {
703 int ret, fd, yes = 1;
704
705 fd = do_create_socket(addrinfo);
706 if (fd < 0)
707 return fd;
708
709 ret = do_connect(fd, addrinfo, timeout);
710 if (ret < 0) {
711 close(fd);
712 return ret;
713 }
714
715 set_socket_timeout(fd, timeout);
716 if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY,
717 (const char *) &yes, sizeof(yes)) < 0) {
718 ret = -errno;
719 close(fd);
720 return ret;
721 }
722
723 return fd;
724 }
725
network_open(const struct iio_device * dev,size_t samples_count,bool cyclic)726 static int network_open(const struct iio_device *dev,
727 size_t samples_count, bool cyclic)
728 {
729 struct iio_context_pdata *pdata = dev->ctx->pdata;
730 struct iio_device_pdata *ppdata = dev->pdata;
731 int ret = -EBUSY;
732
733 iio_mutex_lock(ppdata->lock);
734 if (ppdata->io_ctx.fd >= 0)
735 goto out_mutex_unlock;
736
737 ret = create_socket(pdata->addrinfo, DEFAULT_TIMEOUT_MS);
738 if (ret < 0)
739 goto out_mutex_unlock;
740
741 ppdata->io_ctx.fd = ret;
742 ppdata->io_ctx.cancelled = false;
743 ppdata->io_ctx.cancellable = false;
744 ppdata->io_ctx.timeout_ms = DEFAULT_TIMEOUT_MS;
745
746 ret = iiod_client_open_unlocked(pdata->iiod_client,
747 &ppdata->io_ctx, dev, samples_count, cyclic);
748 if (ret < 0)
749 goto err_close_socket;
750
751 ret = setup_cancel(&ppdata->io_ctx);
752 if (ret < 0)
753 goto err_close_socket;
754
755 set_socket_timeout(ppdata->io_ctx.fd, pdata->io_ctx.timeout_ms);
756
757 ppdata->io_ctx.timeout_ms = pdata->io_ctx.timeout_ms;
758 ppdata->io_ctx.cancellable = true;
759 ppdata->is_tx = iio_device_is_tx(dev);
760 ppdata->is_cyclic = cyclic;
761 ppdata->wait_for_err_code = false;
762 #ifdef WITH_NETWORK_GET_BUFFER
763 ppdata->mmap_len = samples_count * iio_device_get_sample_size(dev);
764 #endif
765
766 iio_mutex_unlock(ppdata->lock);
767
768 return 0;
769
770 err_close_socket:
771 close(ppdata->io_ctx.fd);
772 ppdata->io_ctx.fd = -1;
773 out_mutex_unlock:
774 iio_mutex_unlock(ppdata->lock);
775 return ret;
776 }
777
network_close(const struct iio_device * dev)778 static int network_close(const struct iio_device *dev)
779 {
780 struct iio_device_pdata *pdata = dev->pdata;
781 int ret = -EBADF;
782
783 iio_mutex_lock(pdata->lock);
784
785 if (pdata->io_ctx.fd >= 0) {
786 if (!pdata->io_ctx.cancelled) {
787 ret = iiod_client_close_unlocked(
788 dev->ctx->pdata->iiod_client,
789 &pdata->io_ctx, dev);
790
791 write_command(&pdata->io_ctx, "\r\nEXIT\r\n");
792 } else {
793 ret = 0;
794 }
795
796 cleanup_cancel(&pdata->io_ctx);
797 close(pdata->io_ctx.fd);
798 pdata->io_ctx.fd = -1;
799 }
800
801 #ifdef WITH_NETWORK_GET_BUFFER
802 if (pdata->memfd >= 0)
803 close(pdata->memfd);
804 pdata->memfd = -1;
805
806 if (pdata->mmap_addr) {
807 munmap(pdata->mmap_addr, pdata->mmap_len);
808 pdata->mmap_addr = NULL;
809 }
810 #endif
811
812 iio_mutex_unlock(pdata->lock);
813 return ret;
814 }
815
network_read(const struct iio_device * dev,void * dst,size_t len,uint32_t * mask,size_t words)816 static ssize_t network_read(const struct iio_device *dev, void *dst, size_t len,
817 uint32_t *mask, size_t words)
818 {
819 struct iio_device_pdata *pdata = dev->pdata;
820 ssize_t ret;
821
822 iio_mutex_lock(pdata->lock);
823 ret = iiod_client_read_unlocked(dev->ctx->pdata->iiod_client,
824 &pdata->io_ctx, dev, dst, len, mask, words);
825 iio_mutex_unlock(pdata->lock);
826
827 return ret;
828 }
829
network_write(const struct iio_device * dev,const void * src,size_t len)830 static ssize_t network_write(const struct iio_device *dev,
831 const void *src, size_t len)
832 {
833 struct iio_device_pdata *pdata = dev->pdata;
834 ssize_t ret;
835
836 iio_mutex_lock(pdata->lock);
837 ret = iiod_client_write_unlocked(dev->ctx->pdata->iiod_client,
838 &pdata->io_ctx, dev, src, len);
839 iio_mutex_unlock(pdata->lock);
840
841 return ret;
842 }
843
844 #ifdef WITH_NETWORK_GET_BUFFER
845
read_all(struct iio_network_io_context * io_ctx,void * dst,size_t len)846 static ssize_t read_all(struct iio_network_io_context *io_ctx,
847 void *dst, size_t len)
848 {
849 uintptr_t ptr = (uintptr_t) dst;
850 while (len) {
851 ssize_t ret = network_recv(io_ctx, (void *) ptr, len, 0);
852 if (ret < 0)
853 return ret;
854 ptr += ret;
855 len -= ret;
856 }
857 return (ssize_t)(ptr - (uintptr_t) dst);
858 }
859
read_integer(struct iio_network_io_context * io_ctx,long * val)860 static int read_integer(struct iio_network_io_context *io_ctx, long *val)
861 {
862 unsigned int i;
863 char buf[1024], *ptr;
864 ssize_t ret;
865 bool found = false;
866
867 for (i = 0; i < sizeof(buf) - 1; i++) {
868 ret = read_all(io_ctx, buf + i, 1);
869 if (ret < 0)
870 return (int) ret;
871
872 /* Skip the eventual first few carriage returns.
873 * Also stop when a dot is found (for parsing floats) */
874 if (buf[i] != '\n' && buf[i] != '.')
875 found = true;
876 else if (found)
877 break;
878 }
879
880 buf[i] = '\0';
881 ret = (ssize_t) strtol(buf, &ptr, 10);
882 if (ptr == buf)
883 return -EINVAL;
884 *val = (long) ret;
885 return 0;
886 }
887
network_read_mask(struct iio_network_io_context * io_ctx,uint32_t * mask,size_t words)888 static ssize_t network_read_mask(struct iio_network_io_context *io_ctx,
889 uint32_t *mask, size_t words)
890 {
891 long read_len;
892 ssize_t ret;
893
894 ret = read_integer(io_ctx, &read_len);
895 if (ret < 0)
896 return ret;
897
898 if (read_len > 0 && mask) {
899 size_t i;
900 char buf[9];
901
902 buf[8] = '\0';
903 DEBUG("Reading mask\n");
904
905 for (i = words; i > 0; i--) {
906 ret = read_all(io_ctx, buf, 8);
907 if (ret < 0)
908 return ret;
909
910 sscanf(buf, "%08x", &mask[i - 1]);
911 DEBUG("mask[%lu] = 0x%x\n",
912 (unsigned long)(i - 1), mask[i - 1]);
913 }
914 }
915
916 if (read_len > 0) {
917 char c;
918 ssize_t nb = read_all(io_ctx, &c, 1);
919 if (nb > 0 && c != '\n')
920 read_len = -EIO;
921 }
922
923 return (ssize_t) read_len;
924 }
925
read_error_code(struct iio_network_io_context * io_ctx)926 static ssize_t read_error_code(struct iio_network_io_context *io_ctx)
927 {
928 /*
929 * The server returns two integer codes.
930 * The first one is returned right after the WRITEBUF command is issued,
931 * and corresponds to the error code returned when the server attempted
932 * to open the device.
933 * If zero, a second error code is returned, that corresponds (if positive)
934 * to the number of bytes written.
935 *
936 * To speed up things, we delay error reporting. We just send out the
937 * data without reading the error code that the server gives us, because
938 * the answer will take too much time. If an error occured, it will be
939 * reported by the next call to iio_buffer_push().
940 */
941
942 unsigned int i;
943 long resp = 0;
944
945 for (i = 0; i < 2; i++) {
946 ssize_t ret = read_integer(io_ctx, &resp);
947 if (ret < 0)
948 return ret;
949 if (resp < 0)
950 return (ssize_t) resp;
951 }
952
953 return (ssize_t) resp;
954 }
955
write_rwbuf_command(const struct iio_device * dev,const char * cmd)956 static ssize_t write_rwbuf_command(const struct iio_device *dev,
957 const char *cmd)
958 {
959 struct iio_device_pdata *pdata = dev->pdata;
960
961 if (pdata->wait_for_err_code) {
962 ssize_t ret = read_error_code(&pdata->io_ctx);
963
964 pdata->wait_for_err_code = false;
965 if (ret < 0)
966 return ret;
967 }
968
969 return write_command(&pdata->io_ctx, cmd);
970 }
971
network_do_splice(struct iio_device_pdata * pdata,size_t len,bool read)972 static ssize_t network_do_splice(struct iio_device_pdata *pdata, size_t len,
973 bool read)
974 {
975 int pipefd[2];
976 int fd_in, fd_out;
977 ssize_t ret, read_len = len, write_len = 0;
978
979 ret = (ssize_t) pipe2(pipefd, O_CLOEXEC);
980 if (ret < 0)
981 return -errno;
982
983 if (read) {
984 fd_in = pdata->io_ctx.fd;
985 fd_out = pdata->memfd;
986 } else {
987 fd_in = pdata->memfd;
988 fd_out = pdata->io_ctx.fd;
989 }
990
991 do {
992 ret = wait_cancellable(&pdata->io_ctx, read);
993 if (ret < 0)
994 goto err_close_pipe;
995
996 if (read_len) {
997 /*
998 * SPLICE_F_NONBLOCK is just here to avoid a deadlock when
999 * splicing from a socket. As the socket is not in
1000 * non-blocking mode, it should never return -EAGAIN.
1001 * TODO(pcercuei): Find why it locks...
1002 * */
1003 ret = splice(fd_in, NULL, pipefd[1], NULL, read_len,
1004 SPLICE_F_MOVE | SPLICE_F_NONBLOCK);
1005 if (!ret)
1006 ret = -EIO;
1007 if (ret < 0 && errno != EAGAIN) {
1008 ret = -errno;
1009 goto err_close_pipe;
1010 } else if (ret > 0) {
1011 write_len += ret;
1012 read_len -= ret;
1013 }
1014 }
1015
1016 if (write_len) {
1017 ret = splice(pipefd[0], NULL, fd_out, NULL, write_len,
1018 SPLICE_F_MOVE | SPLICE_F_NONBLOCK);
1019 if (!ret)
1020 ret = -EIO;
1021 if (ret < 0 && errno != EAGAIN) {
1022 ret = -errno;
1023 goto err_close_pipe;
1024 } else if (ret > 0) {
1025 write_len -= ret;
1026 }
1027 }
1028
1029 } while (write_len || read_len);
1030
1031 err_close_pipe:
1032 close(pipefd[0]);
1033 close(pipefd[1]);
1034 return ret < 0 ? ret : len;
1035 }
1036
network_get_buffer(const struct iio_device * dev,void ** addr_ptr,size_t bytes_used,uint32_t * mask,size_t words)1037 static ssize_t network_get_buffer(const struct iio_device *dev,
1038 void **addr_ptr, size_t bytes_used,
1039 uint32_t *mask, size_t words)
1040 {
1041 struct iio_device_pdata *pdata = dev->pdata;
1042 ssize_t ret, read = 0;
1043 int memfd;
1044
1045 if (pdata->is_cyclic)
1046 return -ENOSYS;
1047
1048 /* We check early that the temporary file can be created, so that we can
1049 * return -ENOSYS in case it fails, which will indicate that the
1050 * high-speed interface is not available.
1051 *
1052 * O_TMPFILE -> Linux 3.11.
1053 * TODO: use memfd_create (Linux 3.17) */
1054 memfd = open(P_tmpdir, O_RDWR | O_TMPFILE | O_EXCL | O_CLOEXEC, S_IRWXU);
1055 if (memfd < 0)
1056 return -ENOSYS;
1057
1058 if (!addr_ptr || words != (dev->nb_channels + 31) / 32) {
1059 close(memfd);
1060 return -EINVAL;
1061 }
1062
1063 if (pdata->mmap_addr)
1064 munmap(pdata->mmap_addr, pdata->mmap_len);
1065
1066 if (pdata->mmap_addr && pdata->is_tx) {
1067 char buf[1024];
1068
1069 iio_snprintf(buf, sizeof(buf), "WRITEBUF %s %lu\r\n",
1070 dev->id, (unsigned long) bytes_used);
1071
1072 iio_mutex_lock(pdata->lock);
1073
1074 ret = write_rwbuf_command(dev, buf);
1075 if (ret < 0)
1076 goto err_close_memfd;
1077
1078 ret = network_do_splice(pdata, bytes_used, false);
1079 if (ret < 0)
1080 goto err_close_memfd;
1081
1082 pdata->wait_for_err_code = true;
1083 iio_mutex_unlock(pdata->lock);
1084 }
1085
1086 if (pdata->memfd >= 0)
1087 close(pdata->memfd);
1088
1089 pdata->memfd = memfd;
1090
1091 ret = (ssize_t) ftruncate(pdata->memfd, pdata->mmap_len);
1092 if (ret < 0) {
1093 ret = -errno;
1094 ERROR("Unable to truncate temp file: %zi\n", -ret);
1095 return ret;
1096 }
1097
1098 if (!pdata->is_tx) {
1099 char buf[1024];
1100 size_t len = pdata->mmap_len;
1101
1102 iio_snprintf(buf, sizeof(buf), "READBUF %s %lu\r\n",
1103 dev->id, (unsigned long) len);
1104
1105 iio_mutex_lock(pdata->lock);
1106 ret = write_rwbuf_command(dev, buf);
1107 if (ret < 0)
1108 goto err_unlock;
1109
1110 do {
1111 ret = network_read_mask(&pdata->io_ctx, mask, words);
1112 if (!ret)
1113 break;
1114 if (ret < 0)
1115 goto err_unlock;
1116
1117 mask = NULL; /* We read the mask only once */
1118
1119 ret = network_do_splice(pdata, ret, true);
1120 if (ret < 0)
1121 goto err_unlock;
1122
1123 read += ret;
1124 len -= ret;
1125 } while (len);
1126
1127 iio_mutex_unlock(pdata->lock);
1128 }
1129
1130 pdata->mmap_addr = mmap(NULL, pdata->mmap_len,
1131 PROT_READ | PROT_WRITE, MAP_SHARED, pdata->memfd, 0);
1132 if (pdata->mmap_addr == MAP_FAILED) {
1133 pdata->mmap_addr = NULL;
1134 ret = -errno;
1135 ERROR("Unable to mmap: %zi\n", -ret);
1136 return ret;
1137 }
1138
1139 *addr_ptr = pdata->mmap_addr;
1140 return read ? read : (ssize_t) bytes_used;
1141
1142 err_close_memfd:
1143 close(memfd);
1144 err_unlock:
1145 iio_mutex_unlock(pdata->lock);
1146 return ret;
1147 }
1148 #endif
1149
network_read_dev_attr(const struct iio_device * dev,const char * attr,char * dst,size_t len,enum iio_attr_type type)1150 static ssize_t network_read_dev_attr(const struct iio_device *dev,
1151 const char *attr, char *dst, size_t len, enum iio_attr_type type)
1152 {
1153 struct iio_context_pdata *pdata = dev->ctx->pdata;
1154
1155 return iiod_client_read_attr(pdata->iiod_client,
1156 &pdata->io_ctx, dev, NULL, attr, dst, len, type);
1157 }
1158
network_write_dev_attr(const struct iio_device * dev,const char * attr,const char * src,size_t len,enum iio_attr_type type)1159 static ssize_t network_write_dev_attr(const struct iio_device *dev,
1160 const char *attr, const char *src, size_t len, enum iio_attr_type type)
1161 {
1162 struct iio_context_pdata *pdata = dev->ctx->pdata;
1163
1164 return iiod_client_write_attr(pdata->iiod_client,
1165 &pdata->io_ctx, dev, NULL, attr, src, len, type);
1166 }
1167
network_read_chn_attr(const struct iio_channel * chn,const char * attr,char * dst,size_t len)1168 static ssize_t network_read_chn_attr(const struct iio_channel *chn,
1169 const char *attr, char *dst, size_t len)
1170 {
1171 struct iio_context_pdata *pdata = chn->dev->ctx->pdata;
1172
1173 return iiod_client_read_attr(pdata->iiod_client,
1174 &pdata->io_ctx, chn->dev, chn, attr, dst, len, false);
1175 }
1176
network_write_chn_attr(const struct iio_channel * chn,const char * attr,const char * src,size_t len)1177 static ssize_t network_write_chn_attr(const struct iio_channel *chn,
1178 const char *attr, const char *src, size_t len)
1179 {
1180 struct iio_context_pdata *pdata = chn->dev->ctx->pdata;
1181
1182 return iiod_client_write_attr(pdata->iiod_client,
1183 &pdata->io_ctx, chn->dev, chn, attr, src, len, false);
1184 }
1185
network_get_trigger(const struct iio_device * dev,const struct iio_device ** trigger)1186 static int network_get_trigger(const struct iio_device *dev,
1187 const struct iio_device **trigger)
1188 {
1189 struct iio_context_pdata *pdata = dev->ctx->pdata;
1190
1191 return iiod_client_get_trigger(pdata->iiod_client,
1192 &pdata->io_ctx, dev, trigger);
1193 }
1194
network_set_trigger(const struct iio_device * dev,const struct iio_device * trigger)1195 static int network_set_trigger(const struct iio_device *dev,
1196 const struct iio_device *trigger)
1197 {
1198 struct iio_context_pdata *pdata = dev->ctx->pdata;
1199
1200 return iiod_client_set_trigger(pdata->iiod_client,
1201 &pdata->io_ctx, dev, trigger);
1202 }
1203
network_shutdown(struct iio_context * ctx)1204 static void network_shutdown(struct iio_context *ctx)
1205 {
1206 struct iio_context_pdata *pdata = ctx->pdata;
1207 unsigned int i;
1208
1209 iio_mutex_lock(pdata->lock);
1210 write_command(&pdata->io_ctx, "\r\nEXIT\r\n");
1211 close(pdata->io_ctx.fd);
1212 iio_mutex_unlock(pdata->lock);
1213
1214 for (i = 0; i < ctx->nb_devices; i++) {
1215 struct iio_device *dev = ctx->devices[i];
1216 struct iio_device_pdata *dpdata = dev->pdata;
1217
1218 if (dpdata) {
1219 network_close(dev);
1220 iio_mutex_destroy(dpdata->lock);
1221 free(dpdata);
1222 }
1223 }
1224
1225 iiod_client_destroy(pdata->iiod_client);
1226 iio_mutex_destroy(pdata->lock);
1227 freeaddrinfo(pdata->addrinfo);
1228 free(pdata);
1229 }
1230
network_get_version(const struct iio_context * ctx,unsigned int * major,unsigned int * minor,char git_tag[8])1231 static int network_get_version(const struct iio_context *ctx,
1232 unsigned int *major, unsigned int *minor, char git_tag[8])
1233 {
1234 return iiod_client_get_version(ctx->pdata->iiod_client,
1235 &ctx->pdata->io_ctx, major, minor, git_tag);
1236 }
1237
calculate_remote_timeout(unsigned int timeout)1238 static unsigned int calculate_remote_timeout(unsigned int timeout)
1239 {
1240 /* XXX(pcercuei): We currently hardcode timeout / 2 for the backend used
1241 * by the remote. Is there something better to do here? */
1242 return timeout / 2;
1243 }
1244
network_set_timeout(struct iio_context * ctx,unsigned int timeout)1245 static int network_set_timeout(struct iio_context *ctx, unsigned int timeout)
1246 {
1247 struct iio_context_pdata *pdata = ctx->pdata;
1248 int ret, fd = pdata->io_ctx.fd;
1249
1250 ret = set_socket_timeout(fd, timeout);
1251 if (!ret) {
1252 unsigned int remote_timeout = calculate_remote_timeout(timeout);
1253
1254 ret = iiod_client_set_timeout(pdata->iiod_client,
1255 &pdata->io_ctx, remote_timeout);
1256 if (!ret)
1257 pdata->io_ctx.timeout_ms = timeout;
1258 }
1259 if (ret < 0) {
1260 char buf[1024];
1261 iio_strerror(-ret, buf, sizeof(buf));
1262 WARNING("Unable to set R/W timeout: %s\n", buf);
1263 }
1264 return ret;
1265 }
1266
network_set_kernel_buffers_count(const struct iio_device * dev,unsigned int nb_blocks)1267 static int network_set_kernel_buffers_count(const struct iio_device *dev,
1268 unsigned int nb_blocks)
1269 {
1270 struct iio_context_pdata *pdata = dev->ctx->pdata;
1271
1272 return iiod_client_set_kernel_buffers_count(pdata->iiod_client,
1273 &pdata->io_ctx, dev, nb_blocks);
1274 }
1275
network_clone(const struct iio_context * ctx)1276 static struct iio_context * network_clone(const struct iio_context *ctx)
1277 {
1278 const char *addr = iio_context_get_attr_value(ctx, "ip,ip-addr");
1279
1280 return iio_create_network_context(addr);
1281 }
1282
1283 static const struct iio_backend_ops network_ops = {
1284 .clone = network_clone,
1285 .open = network_open,
1286 .close = network_close,
1287 .read = network_read,
1288 .write = network_write,
1289 #ifdef WITH_NETWORK_GET_BUFFER
1290 .get_buffer = network_get_buffer,
1291 #endif
1292 .read_device_attr = network_read_dev_attr,
1293 .write_device_attr = network_write_dev_attr,
1294 .read_channel_attr = network_read_chn_attr,
1295 .write_channel_attr = network_write_chn_attr,
1296 .get_trigger = network_get_trigger,
1297 .set_trigger = network_set_trigger,
1298 .shutdown = network_shutdown,
1299 .get_version = network_get_version,
1300 .set_timeout = network_set_timeout,
1301 .set_kernel_buffers_count = network_set_kernel_buffers_count,
1302
1303 .cancel = network_cancel,
1304 };
1305
network_write_data(struct iio_context_pdata * pdata,void * io_data,const char * src,size_t len)1306 static ssize_t network_write_data(struct iio_context_pdata *pdata,
1307 void *io_data, const char *src, size_t len)
1308 {
1309 struct iio_network_io_context *io_ctx = io_data;
1310
1311 return network_send(io_ctx, src, len, 0);
1312 }
1313
network_read_data(struct iio_context_pdata * pdata,void * io_data,char * dst,size_t len)1314 static ssize_t network_read_data(struct iio_context_pdata *pdata,
1315 void *io_data, char *dst, size_t len)
1316 {
1317 struct iio_network_io_context *io_ctx = io_data;
1318
1319 return network_recv(io_ctx, dst, len, 0);
1320 }
1321
network_read_line(struct iio_context_pdata * pdata,void * io_data,char * dst,size_t len)1322 static ssize_t network_read_line(struct iio_context_pdata *pdata,
1323 void *io_data, char *dst, size_t len)
1324 {
1325 bool found = false;
1326 size_t i;
1327 #ifdef __linux__
1328 struct iio_network_io_context *io_ctx = io_data;
1329 ssize_t ret;
1330 size_t bytes_read = 0;
1331
1332 do {
1333 size_t to_trunc;
1334
1335 ret = network_recv(io_ctx, dst, len, MSG_PEEK);
1336 if (ret < 0)
1337 return ret;
1338
1339 /* Lookup for the trailing \n */
1340 for (i = 0; i < (size_t) ret && dst[i] != '\n'; i++);
1341 found = i < (size_t) ret;
1342
1343 len -= ret;
1344 dst += ret;
1345
1346 if (found)
1347 to_trunc = i + 1;
1348 else
1349 to_trunc = (size_t) ret;
1350
1351 /* Advance the read offset to the byte following the \n if
1352 * found, or after the last charater read otherwise */
1353 if (pdata->msg_trunc_supported)
1354 ret = network_recv(io_ctx, NULL, to_trunc, MSG_TRUNC);
1355 else
1356 ret = network_recv(io_ctx, dst - ret, to_trunc, 0);
1357 if (ret < 0)
1358 return ret;
1359
1360 bytes_read += to_trunc;
1361 } while (!found && len);
1362
1363 if (!found)
1364 return -EIO;
1365 else
1366 return bytes_read;
1367 #else
1368 for (i = 0; i < len - 1; i++) {
1369 ssize_t ret = network_read_data(pdata, io_data, dst + i, 1);
1370
1371 if (ret < 0)
1372 return ret;
1373
1374 if (dst[i] != '\n')
1375 found = true;
1376 else if (found)
1377 break;
1378 }
1379
1380 if (!found || i == len - 1)
1381 return -EIO;
1382
1383 return (ssize_t) i + 1;
1384 #endif
1385 }
1386
1387 static const struct iiod_client_ops network_iiod_client_ops = {
1388 .write = network_write_data,
1389 .read = network_read_data,
1390 .read_line = network_read_line,
1391 };
1392
1393 #ifdef __linux__
1394 /*
1395 * As of build 16299, Windows Subsystem for Linux presents a Linux API but
1396 * without support for MSG_TRUNC. Since WSL allows running native Linux
1397 * applications this is not something that can be detected at compile time. If
1398 * we want to support WSL we have to have a runtime workaround.
1399 */
msg_trunc_supported(struct iio_network_io_context * io_ctx)1400 static bool msg_trunc_supported(struct iio_network_io_context *io_ctx)
1401 {
1402 int ret;
1403
1404 ret = network_recv(io_ctx, NULL, 0, MSG_TRUNC | MSG_DONTWAIT);
1405
1406 return ret != -EFAULT && ret != -EINVAL;
1407 }
1408 #else
msg_trunc_supported(struct iio_network_io_context * io_ctx)1409 static bool msg_trunc_supported(struct iio_network_io_context *io_ctx)
1410 {
1411 return false;
1412 }
1413 #endif
1414
network_create_context(const char * host)1415 struct iio_context * network_create_context(const char *host)
1416 {
1417 struct addrinfo hints, *res;
1418 struct iio_context *ctx;
1419 struct iio_context_pdata *pdata;
1420 size_t i, len;
1421 int fd, ret;
1422 char *description;
1423 #ifdef _WIN32
1424 WSADATA wsaData;
1425
1426 ret = WSAStartup(MAKEWORD(2, 0), &wsaData);
1427 if (ret < 0) {
1428 ERROR("WSAStartup failed with error %i\n", ret);
1429 errno = -ret;
1430 return NULL;
1431 }
1432 #endif
1433
1434 memset(&hints, 0, sizeof(hints));
1435 hints.ai_family = AF_UNSPEC;
1436 hints.ai_socktype = SOCK_STREAM;
1437
1438 #ifdef HAVE_AVAHI
1439 if (!host) {
1440 char addr_str[AVAHI_ADDRESS_STR_MAX];
1441 char port_str[6];
1442 AvahiAddress address;
1443 uint16_t port = IIOD_PORT;
1444
1445 memset(&address, 0, sizeof(address));
1446
1447 ret = discover_host(&address, &port);
1448 if (ret < 0) {
1449 char buf[1024];
1450 iio_strerror(-ret, buf, sizeof(buf));
1451 DEBUG("Unable to find host: %s\n", buf);
1452 errno = -ret;
1453 return NULL;
1454 }
1455
1456 avahi_address_snprint(addr_str, sizeof(addr_str), &address);
1457 iio_snprintf(port_str, sizeof(port_str), "%hu", port);
1458 ret = getaddrinfo(addr_str, port_str, &hints, &res);
1459 } else
1460 #endif
1461 {
1462 ret = getaddrinfo(host, IIOD_PORT_STR, &hints, &res);
1463 }
1464
1465 if (ret) {
1466 ERROR("Unable to find host: %s\n", gai_strerror(ret));
1467 #ifndef _WIN32
1468 if (ret != EAI_SYSTEM)
1469 errno = -ret;
1470 #endif
1471 return NULL;
1472 }
1473
1474 fd = create_socket(res, DEFAULT_TIMEOUT_MS);
1475 if (fd < 0) {
1476 errno = -fd;
1477 goto err_free_addrinfo;
1478 }
1479
1480 pdata = zalloc(sizeof(*pdata));
1481 if (!pdata) {
1482 errno = ENOMEM;
1483 goto err_close_socket;
1484 }
1485
1486 pdata->io_ctx.fd = fd;
1487 pdata->addrinfo = res;
1488 pdata->io_ctx.timeout_ms = DEFAULT_TIMEOUT_MS;
1489
1490 pdata->lock = iio_mutex_create();
1491 if (!pdata->lock) {
1492 errno = ENOMEM;
1493 goto err_free_pdata;
1494 }
1495
1496 pdata->iiod_client = iiod_client_new(pdata, pdata->lock,
1497 &network_iiod_client_ops);
1498
1499 pdata->msg_trunc_supported = msg_trunc_supported(&pdata->io_ctx);
1500 if (pdata->msg_trunc_supported)
1501 DEBUG("MSG_TRUNC is supported\n");
1502 else
1503 DEBUG("MSG_TRUNC is NOT supported\n");
1504
1505 if (!pdata->iiod_client)
1506 goto err_destroy_mutex;
1507
1508 DEBUG("Creating context...\n");
1509 ctx = iiod_client_create_context(pdata->iiod_client, &pdata->io_ctx);
1510 if (!ctx)
1511 goto err_destroy_iiod_client;
1512
1513 /* Override the name and low-level functions of the XML context
1514 * with those corresponding to the network context */
1515 ctx->name = "network";
1516 ctx->ops = &network_ops;
1517 ctx->pdata = pdata;
1518
1519 #ifdef HAVE_IPV6
1520 len = INET6_ADDRSTRLEN + IF_NAMESIZE + 2;
1521 #else
1522 len = INET_ADDRSTRLEN + 1;
1523 #endif
1524
1525 description = malloc(len);
1526 if (!description) {
1527 ret = -ENOMEM;
1528 goto err_network_shutdown;
1529 }
1530
1531 description[0] = '\0';
1532
1533 #ifdef HAVE_IPV6
1534 if (res->ai_family == AF_INET6) {
1535 struct sockaddr_in6 *in = (struct sockaddr_in6 *) res->ai_addr;
1536 char *ptr;
1537 inet_ntop(AF_INET6, &in->sin6_addr,
1538 description, INET6_ADDRSTRLEN);
1539
1540 ptr = if_indextoname(in->sin6_scope_id, description +
1541 strlen(description) + 1);
1542 if (!ptr) {
1543 ret = -errno;
1544 ERROR("Unable to lookup interface of IPv6 address\n");
1545 goto err_free_description;
1546 }
1547
1548 *(ptr - 1) = '%';
1549 }
1550 #endif
1551 if (res->ai_family == AF_INET) {
1552 struct sockaddr_in *in = (struct sockaddr_in *) res->ai_addr;
1553 #if (!_WIN32 || _WIN32_WINNT >= 0x600)
1554 inet_ntop(AF_INET, &in->sin_addr, description, INET_ADDRSTRLEN);
1555 #else
1556 char *tmp = inet_ntoa(in->sin_addr);
1557 strncpy(description, tmp, len);
1558 #endif
1559 }
1560
1561 ret = iio_context_add_attr(ctx, "ip,ip-addr", description);
1562 if (ret < 0)
1563 goto err_free_description;
1564
1565 for (i = 0; i < ctx->nb_devices; i++) {
1566 struct iio_device *dev = ctx->devices[i];
1567
1568 dev->pdata = zalloc(sizeof(*dev->pdata));
1569 if (!dev->pdata) {
1570 ret = -ENOMEM;
1571 goto err_free_description;
1572 }
1573
1574 dev->pdata->io_ctx.fd = -1;
1575 dev->pdata->io_ctx.timeout_ms = DEFAULT_TIMEOUT_MS;
1576 #ifdef WITH_NETWORK_GET_BUFFER
1577 dev->pdata->memfd = -1;
1578 #endif
1579
1580 dev->pdata->lock = iio_mutex_create();
1581 if (!dev->pdata->lock) {
1582 ret = -ENOMEM;
1583 goto err_free_description;
1584 }
1585 }
1586
1587 if (ctx->description) {
1588 size_t desc_len = strlen(description);
1589 size_t new_size = desc_len + strlen(ctx->description) + 2;
1590 char *ptr, *new_description = realloc(description, new_size);
1591 if (!new_description) {
1592 ret = -ENOMEM;
1593 goto err_free_description;
1594 }
1595
1596 ptr = strrchr(new_description, '\0');
1597 iio_snprintf(ptr, new_size - desc_len, " %s", ctx->description);
1598 free(ctx->description);
1599
1600 ctx->description = new_description;
1601 } else {
1602 ctx->description = description;
1603 }
1604
1605 iiod_client_set_timeout(pdata->iiod_client, &pdata->io_ctx,
1606 calculate_remote_timeout(DEFAULT_TIMEOUT_MS));
1607 return ctx;
1608
1609 err_free_description:
1610 free(description);
1611 err_network_shutdown:
1612 iio_context_destroy(ctx);
1613 errno = -ret;
1614 return NULL;
1615
1616 err_destroy_iiod_client:
1617 iiod_client_destroy(pdata->iiod_client);
1618 err_destroy_mutex:
1619 iio_mutex_destroy(pdata->lock);
1620 err_free_pdata:
1621 free(pdata);
1622 err_close_socket:
1623 close(fd);
1624 err_free_addrinfo:
1625 freeaddrinfo(res);
1626 return NULL;
1627 }
1628