1 /*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2007, 2012-2017, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <linux/rhashtable.h>
38 #include <linux/sched/signal.h>
39
40 #include "core.h"
41 #include "name_table.h"
42 #include "node.h"
43 #include "link.h"
44 #include "name_distr.h"
45 #include "socket.h"
46 #include "bcast.h"
47 #include "netlink.h"
48 #include "group.h"
49 #include "trace.h"
50
51 #define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
52 #define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
53 #define TIPC_FWD_MSG 1
54 #define TIPC_MAX_PORT 0xffffffff
55 #define TIPC_MIN_PORT 1
56 #define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
57
58 enum {
59 TIPC_LISTEN = TCP_LISTEN,
60 TIPC_ESTABLISHED = TCP_ESTABLISHED,
61 TIPC_OPEN = TCP_CLOSE,
62 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
63 TIPC_CONNECTING = TCP_SYN_SENT,
64 };
65
66 struct sockaddr_pair {
67 struct sockaddr_tipc sock;
68 struct sockaddr_tipc member;
69 };
70
71 /**
72 * struct tipc_sock - TIPC socket structure
73 * @sk: socket - interacts with 'port' and with user via the socket API
74 * @conn_type: TIPC type used when connection was established
75 * @conn_instance: TIPC instance used when connection was established
76 * @published: non-zero if port has one or more associated names
77 * @max_pkt: maximum packet size "hint" used when building messages sent by port
78 * @portid: unique port identity in TIPC socket hash table
79 * @phdr: preformatted message header used when sending messages
80 * #cong_links: list of congested links
81 * @publications: list of publications for port
82 * @blocking_link: address of the congested link we are currently sleeping on
83 * @pub_count: total # of publications port has made during its lifetime
84 * @conn_timeout: the time we can wait for an unresponded setup request
85 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
86 * @cong_link_cnt: number of congested links
87 * @snt_unacked: # messages sent by socket, and not yet acked by peer
88 * @rcv_unacked: # messages read by user, but not yet acked back to peer
89 * @peer: 'connected' peer for dgram/rdm
90 * @node: hash table node
91 * @mc_method: cookie for use between socket and broadcast layer
92 * @rcu: rcu struct for tipc_sock
93 */
94 struct tipc_sock {
95 struct sock sk;
96 u32 conn_type;
97 u32 conn_instance;
98 int published;
99 u32 max_pkt;
100 u32 portid;
101 struct tipc_msg phdr;
102 struct list_head cong_links;
103 struct list_head publications;
104 u32 pub_count;
105 atomic_t dupl_rcvcnt;
106 u16 conn_timeout;
107 bool probe_unacked;
108 u16 cong_link_cnt;
109 u16 snt_unacked;
110 u16 snd_win;
111 u16 peer_caps;
112 u16 rcv_unacked;
113 u16 rcv_win;
114 struct sockaddr_tipc peer;
115 struct rhash_head node;
116 struct tipc_mc_method mc_method;
117 struct rcu_head rcu;
118 struct tipc_group *group;
119 bool group_is_open;
120 };
121
122 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
123 static void tipc_data_ready(struct sock *sk);
124 static void tipc_write_space(struct sock *sk);
125 static void tipc_sock_destruct(struct sock *sk);
126 static int tipc_release(struct socket *sock);
127 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
128 bool kern);
129 static void tipc_sk_timeout(struct timer_list *t);
130 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
131 struct tipc_name_seq const *seq);
132 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
133 struct tipc_name_seq const *seq);
134 static int tipc_sk_leave(struct tipc_sock *tsk);
135 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
136 static int tipc_sk_insert(struct tipc_sock *tsk);
137 static void tipc_sk_remove(struct tipc_sock *tsk);
138 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
139 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
140
141 static const struct proto_ops packet_ops;
142 static const struct proto_ops stream_ops;
143 static const struct proto_ops msg_ops;
144 static struct proto tipc_proto;
145 static const struct rhashtable_params tsk_rht_params;
146
tsk_own_node(struct tipc_sock * tsk)147 static u32 tsk_own_node(struct tipc_sock *tsk)
148 {
149 return msg_prevnode(&tsk->phdr);
150 }
151
tsk_peer_node(struct tipc_sock * tsk)152 static u32 tsk_peer_node(struct tipc_sock *tsk)
153 {
154 return msg_destnode(&tsk->phdr);
155 }
156
tsk_peer_port(struct tipc_sock * tsk)157 static u32 tsk_peer_port(struct tipc_sock *tsk)
158 {
159 return msg_destport(&tsk->phdr);
160 }
161
tsk_unreliable(struct tipc_sock * tsk)162 static bool tsk_unreliable(struct tipc_sock *tsk)
163 {
164 return msg_src_droppable(&tsk->phdr) != 0;
165 }
166
tsk_set_unreliable(struct tipc_sock * tsk,bool unreliable)167 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
168 {
169 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
170 }
171
tsk_unreturnable(struct tipc_sock * tsk)172 static bool tsk_unreturnable(struct tipc_sock *tsk)
173 {
174 return msg_dest_droppable(&tsk->phdr) != 0;
175 }
176
tsk_set_unreturnable(struct tipc_sock * tsk,bool unreturnable)177 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
178 {
179 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
180 }
181
tsk_importance(struct tipc_sock * tsk)182 static int tsk_importance(struct tipc_sock *tsk)
183 {
184 return msg_importance(&tsk->phdr);
185 }
186
tsk_set_importance(struct tipc_sock * tsk,int imp)187 static int tsk_set_importance(struct tipc_sock *tsk, int imp)
188 {
189 if (imp > TIPC_CRITICAL_IMPORTANCE)
190 return -EINVAL;
191 msg_set_importance(&tsk->phdr, (u32)imp);
192 return 0;
193 }
194
tipc_sk(const struct sock * sk)195 static struct tipc_sock *tipc_sk(const struct sock *sk)
196 {
197 return container_of(sk, struct tipc_sock, sk);
198 }
199
tsk_conn_cong(struct tipc_sock * tsk)200 static bool tsk_conn_cong(struct tipc_sock *tsk)
201 {
202 return tsk->snt_unacked > tsk->snd_win;
203 }
204
tsk_blocks(int len)205 static u16 tsk_blocks(int len)
206 {
207 return ((len / FLOWCTL_BLK_SZ) + 1);
208 }
209
210 /* tsk_blocks(): translate a buffer size in bytes to number of
211 * advertisable blocks, taking into account the ratio truesize(len)/len
212 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
213 */
tsk_adv_blocks(int len)214 static u16 tsk_adv_blocks(int len)
215 {
216 return len / FLOWCTL_BLK_SZ / 4;
217 }
218
219 /* tsk_inc(): increment counter for sent or received data
220 * - If block based flow control is not supported by peer we
221 * fall back to message based ditto, incrementing the counter
222 */
tsk_inc(struct tipc_sock * tsk,int msglen)223 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
224 {
225 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
226 return ((msglen / FLOWCTL_BLK_SZ) + 1);
227 return 1;
228 }
229
230 /**
231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
232 *
233 * Caller must hold socket lock
234 */
tsk_advance_rx_queue(struct sock * sk)235 static void tsk_advance_rx_queue(struct sock *sk)
236 {
237 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
238 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
239 }
240
241 /* tipc_sk_respond() : send response message back to sender
242 */
tipc_sk_respond(struct sock * sk,struct sk_buff * skb,int err)243 static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
244 {
245 u32 selector;
246 u32 dnode;
247 u32 onode = tipc_own_addr(sock_net(sk));
248
249 if (!tipc_msg_reverse(onode, &skb, err))
250 return;
251
252 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
253 dnode = msg_destnode(buf_msg(skb));
254 selector = msg_origport(buf_msg(skb));
255 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
256 }
257
258 /**
259 * tsk_rej_rx_queue - reject all buffers in socket receive queue
260 *
261 * Caller must hold socket lock
262 */
tsk_rej_rx_queue(struct sock * sk)263 static void tsk_rej_rx_queue(struct sock *sk)
264 {
265 struct sk_buff *skb;
266
267 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
268 tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
269 }
270
tipc_sk_connected(struct sock * sk)271 static bool tipc_sk_connected(struct sock *sk)
272 {
273 return sk->sk_state == TIPC_ESTABLISHED;
274 }
275
276 /* tipc_sk_type_connectionless - check if the socket is datagram socket
277 * @sk: socket
278 *
279 * Returns true if connection less, false otherwise
280 */
tipc_sk_type_connectionless(struct sock * sk)281 static bool tipc_sk_type_connectionless(struct sock *sk)
282 {
283 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
284 }
285
286 /* tsk_peer_msg - verify if message was sent by connected port's peer
287 *
288 * Handles cases where the node's network address has changed from
289 * the default of <0.0.0> to its configured setting.
290 */
tsk_peer_msg(struct tipc_sock * tsk,struct tipc_msg * msg)291 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
292 {
293 struct sock *sk = &tsk->sk;
294 u32 self = tipc_own_addr(sock_net(sk));
295 u32 peer_port = tsk_peer_port(tsk);
296 u32 orig_node, peer_node;
297
298 if (unlikely(!tipc_sk_connected(sk)))
299 return false;
300
301 if (unlikely(msg_origport(msg) != peer_port))
302 return false;
303
304 orig_node = msg_orignode(msg);
305 peer_node = tsk_peer_node(tsk);
306
307 if (likely(orig_node == peer_node))
308 return true;
309
310 if (!orig_node && peer_node == self)
311 return true;
312
313 if (!peer_node && orig_node == self)
314 return true;
315
316 return false;
317 }
318
319 /* tipc_set_sk_state - set the sk_state of the socket
320 * @sk: socket
321 *
322 * Caller must hold socket lock
323 *
324 * Returns 0 on success, errno otherwise
325 */
tipc_set_sk_state(struct sock * sk,int state)326 static int tipc_set_sk_state(struct sock *sk, int state)
327 {
328 int oldsk_state = sk->sk_state;
329 int res = -EINVAL;
330
331 switch (state) {
332 case TIPC_OPEN:
333 res = 0;
334 break;
335 case TIPC_LISTEN:
336 case TIPC_CONNECTING:
337 if (oldsk_state == TIPC_OPEN)
338 res = 0;
339 break;
340 case TIPC_ESTABLISHED:
341 if (oldsk_state == TIPC_CONNECTING ||
342 oldsk_state == TIPC_OPEN)
343 res = 0;
344 break;
345 case TIPC_DISCONNECTING:
346 if (oldsk_state == TIPC_CONNECTING ||
347 oldsk_state == TIPC_ESTABLISHED)
348 res = 0;
349 break;
350 }
351
352 if (!res)
353 sk->sk_state = state;
354
355 return res;
356 }
357
tipc_sk_sock_err(struct socket * sock,long * timeout)358 static int tipc_sk_sock_err(struct socket *sock, long *timeout)
359 {
360 struct sock *sk = sock->sk;
361 int err = sock_error(sk);
362 int typ = sock->type;
363
364 if (err)
365 return err;
366 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
367 if (sk->sk_state == TIPC_DISCONNECTING)
368 return -EPIPE;
369 else if (!tipc_sk_connected(sk))
370 return -ENOTCONN;
371 }
372 if (!*timeout)
373 return -EAGAIN;
374 if (signal_pending(current))
375 return sock_intr_errno(*timeout);
376
377 return 0;
378 }
379
380 #define tipc_wait_for_cond(sock_, timeo_, condition_) \
381 ({ \
382 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
383 struct sock *sk_; \
384 int rc_; \
385 \
386 while ((rc_ = !(condition_))) { \
387 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
388 smp_rmb(); \
389 sk_ = (sock_)->sk; \
390 rc_ = tipc_sk_sock_err((sock_), timeo_); \
391 if (rc_) \
392 break; \
393 add_wait_queue(sk_sleep(sk_), &wait_); \
394 release_sock(sk_); \
395 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
396 sched_annotate_sleep(); \
397 lock_sock(sk_); \
398 remove_wait_queue(sk_sleep(sk_), &wait_); \
399 } \
400 rc_; \
401 })
402
403 /**
404 * tipc_sk_create - create a TIPC socket
405 * @net: network namespace (must be default network)
406 * @sock: pre-allocated socket structure
407 * @protocol: protocol indicator (must be 0)
408 * @kern: caused by kernel or by userspace?
409 *
410 * This routine creates additional data structures used by the TIPC socket,
411 * initializes them, and links them together.
412 *
413 * Returns 0 on success, errno otherwise
414 */
tipc_sk_create(struct net * net,struct socket * sock,int protocol,int kern)415 static int tipc_sk_create(struct net *net, struct socket *sock,
416 int protocol, int kern)
417 {
418 const struct proto_ops *ops;
419 struct sock *sk;
420 struct tipc_sock *tsk;
421 struct tipc_msg *msg;
422
423 /* Validate arguments */
424 if (unlikely(protocol != 0))
425 return -EPROTONOSUPPORT;
426
427 switch (sock->type) {
428 case SOCK_STREAM:
429 ops = &stream_ops;
430 break;
431 case SOCK_SEQPACKET:
432 ops = &packet_ops;
433 break;
434 case SOCK_DGRAM:
435 case SOCK_RDM:
436 ops = &msg_ops;
437 break;
438 default:
439 return -EPROTOTYPE;
440 }
441
442 /* Allocate socket's protocol area */
443 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
444 if (sk == NULL)
445 return -ENOMEM;
446
447 tsk = tipc_sk(sk);
448 tsk->max_pkt = MAX_PKT_DEFAULT;
449 INIT_LIST_HEAD(&tsk->publications);
450 INIT_LIST_HEAD(&tsk->cong_links);
451 msg = &tsk->phdr;
452
453 /* Finish initializing socket data structures */
454 sock->ops = ops;
455 sock_init_data(sock, sk);
456 tipc_set_sk_state(sk, TIPC_OPEN);
457 if (tipc_sk_insert(tsk)) {
458 pr_warn("Socket create failed; port number exhausted\n");
459 return -EINVAL;
460 }
461
462 /* Ensure tsk is visible before we read own_addr. */
463 smp_mb();
464
465 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
466 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
467
468 msg_set_origport(msg, tsk->portid);
469 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
470 sk->sk_shutdown = 0;
471 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
472 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
473 sk->sk_data_ready = tipc_data_ready;
474 sk->sk_write_space = tipc_write_space;
475 sk->sk_destruct = tipc_sock_destruct;
476 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
477 tsk->group_is_open = true;
478 atomic_set(&tsk->dupl_rcvcnt, 0);
479
480 /* Start out with safe limits until we receive an advertised window */
481 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
482 tsk->rcv_win = tsk->snd_win;
483
484 if (tipc_sk_type_connectionless(sk)) {
485 tsk_set_unreturnable(tsk, true);
486 if (sock->type == SOCK_DGRAM)
487 tsk_set_unreliable(tsk, true);
488 }
489 __skb_queue_head_init(&tsk->mc_method.deferredq);
490 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
491 return 0;
492 }
493
tipc_sk_callback(struct rcu_head * head)494 static void tipc_sk_callback(struct rcu_head *head)
495 {
496 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
497
498 sock_put(&tsk->sk);
499 }
500
501 /* Caller should hold socket lock for the socket. */
__tipc_shutdown(struct socket * sock,int error)502 static void __tipc_shutdown(struct socket *sock, int error)
503 {
504 struct sock *sk = sock->sk;
505 struct tipc_sock *tsk = tipc_sk(sk);
506 struct net *net = sock_net(sk);
507 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
508 u32 dnode = tsk_peer_node(tsk);
509 struct sk_buff *skb;
510
511 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
512 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
513 !tsk_conn_cong(tsk)));
514
515 /* Remove any pending SYN message */
516 __skb_queue_purge(&sk->sk_write_queue);
517
518 /* Reject all unreceived messages, except on an active connection
519 * (which disconnects locally & sends a 'FIN+' to peer).
520 */
521 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
522 if (TIPC_SKB_CB(skb)->bytes_read) {
523 kfree_skb(skb);
524 continue;
525 }
526 if (!tipc_sk_type_connectionless(sk) &&
527 sk->sk_state != TIPC_DISCONNECTING) {
528 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
529 tipc_node_remove_conn(net, dnode, tsk->portid);
530 }
531 tipc_sk_respond(sk, skb, error);
532 }
533
534 if (tipc_sk_type_connectionless(sk))
535 return;
536
537 if (sk->sk_state != TIPC_DISCONNECTING) {
538 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
539 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
540 tsk_own_node(tsk), tsk_peer_port(tsk),
541 tsk->portid, error);
542 if (skb)
543 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
544 tipc_node_remove_conn(net, dnode, tsk->portid);
545 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
546 }
547 }
548
549 /**
550 * tipc_release - destroy a TIPC socket
551 * @sock: socket to destroy
552 *
553 * This routine cleans up any messages that are still queued on the socket.
554 * For DGRAM and RDM socket types, all queued messages are rejected.
555 * For SEQPACKET and STREAM socket types, the first message is rejected
556 * and any others are discarded. (If the first message on a STREAM socket
557 * is partially-read, it is discarded and the next one is rejected instead.)
558 *
559 * NOTE: Rejected messages are not necessarily returned to the sender! They
560 * are returned or discarded according to the "destination droppable" setting
561 * specified for the message by the sender.
562 *
563 * Returns 0 on success, errno otherwise
564 */
tipc_release(struct socket * sock)565 static int tipc_release(struct socket *sock)
566 {
567 struct sock *sk = sock->sk;
568 struct tipc_sock *tsk;
569
570 /*
571 * Exit if socket isn't fully initialized (occurs when a failed accept()
572 * releases a pre-allocated child socket that was never used)
573 */
574 if (sk == NULL)
575 return 0;
576
577 tsk = tipc_sk(sk);
578 lock_sock(sk);
579
580 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
581 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
582 sk->sk_shutdown = SHUTDOWN_MASK;
583 tipc_sk_leave(tsk);
584 tipc_sk_withdraw(tsk, 0, NULL);
585 __skb_queue_purge(&tsk->mc_method.deferredq);
586 sk_stop_timer(sk, &sk->sk_timer);
587 tipc_sk_remove(tsk);
588
589 sock_orphan(sk);
590 /* Reject any messages that accumulated in backlog queue */
591 release_sock(sk);
592 tipc_dest_list_purge(&tsk->cong_links);
593 tsk->cong_link_cnt = 0;
594 call_rcu(&tsk->rcu, tipc_sk_callback);
595 sock->sk = NULL;
596
597 return 0;
598 }
599
600 /**
601 * tipc_bind - associate or disassocate TIPC name(s) with a socket
602 * @sock: socket structure
603 * @uaddr: socket address describing name(s) and desired operation
604 * @uaddr_len: size of socket address data structure
605 *
606 * Name and name sequence binding is indicated using a positive scope value;
607 * a negative scope value unbinds the specified name. Specifying no name
608 * (i.e. a socket address length of 0) unbinds all names from the socket.
609 *
610 * Returns 0 on success, errno otherwise
611 *
612 * NOTE: This routine doesn't need to take the socket lock since it doesn't
613 * access any non-constant socket information.
614 */
tipc_bind(struct socket * sock,struct sockaddr * uaddr,int uaddr_len)615 static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
616 int uaddr_len)
617 {
618 struct sock *sk = sock->sk;
619 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
620 struct tipc_sock *tsk = tipc_sk(sk);
621 int res = -EINVAL;
622
623 lock_sock(sk);
624 if (unlikely(!uaddr_len)) {
625 res = tipc_sk_withdraw(tsk, 0, NULL);
626 goto exit;
627 }
628 if (tsk->group) {
629 res = -EACCES;
630 goto exit;
631 }
632 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
633 res = -EINVAL;
634 goto exit;
635 }
636 if (addr->family != AF_TIPC) {
637 res = -EAFNOSUPPORT;
638 goto exit;
639 }
640
641 if (addr->addrtype == TIPC_ADDR_NAME)
642 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
643 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
644 res = -EAFNOSUPPORT;
645 goto exit;
646 }
647
648 if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
649 (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
650 (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
651 res = -EACCES;
652 goto exit;
653 }
654
655 res = (addr->scope >= 0) ?
656 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
657 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
658 exit:
659 release_sock(sk);
660 return res;
661 }
662
663 /**
664 * tipc_getname - get port ID of socket or peer socket
665 * @sock: socket structure
666 * @uaddr: area for returned socket address
667 * @uaddr_len: area for returned length of socket address
668 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
669 *
670 * Returns 0 on success, errno otherwise
671 *
672 * NOTE: This routine doesn't need to take the socket lock since it only
673 * accesses socket information that is unchanging (or which changes in
674 * a completely predictable manner).
675 */
tipc_getname(struct socket * sock,struct sockaddr * uaddr,int peer)676 static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
677 int peer)
678 {
679 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
680 struct sock *sk = sock->sk;
681 struct tipc_sock *tsk = tipc_sk(sk);
682
683 memset(addr, 0, sizeof(*addr));
684 if (peer) {
685 if ((!tipc_sk_connected(sk)) &&
686 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
687 return -ENOTCONN;
688 addr->addr.id.ref = tsk_peer_port(tsk);
689 addr->addr.id.node = tsk_peer_node(tsk);
690 } else {
691 addr->addr.id.ref = tsk->portid;
692 addr->addr.id.node = tipc_own_addr(sock_net(sk));
693 }
694
695 addr->addrtype = TIPC_ADDR_ID;
696 addr->family = AF_TIPC;
697 addr->scope = 0;
698 addr->addr.name.domain = 0;
699
700 return sizeof(*addr);
701 }
702
703 /**
704 * tipc_poll - read and possibly block on pollmask
705 * @file: file structure associated with the socket
706 * @sock: socket for which to calculate the poll bits
707 * @wait: ???
708 *
709 * Returns pollmask value
710 *
711 * COMMENTARY:
712 * It appears that the usual socket locking mechanisms are not useful here
713 * since the pollmask info is potentially out-of-date the moment this routine
714 * exits. TCP and other protocols seem to rely on higher level poll routines
715 * to handle any preventable race conditions, so TIPC will do the same ...
716 *
717 * IMPORTANT: The fact that a read or write operation is indicated does NOT
718 * imply that the operation will succeed, merely that it should be performed
719 * and will not block.
720 */
tipc_poll(struct file * file,struct socket * sock,poll_table * wait)721 static __poll_t tipc_poll(struct file *file, struct socket *sock,
722 poll_table *wait)
723 {
724 struct sock *sk = sock->sk;
725 struct tipc_sock *tsk = tipc_sk(sk);
726 __poll_t revents = 0;
727
728 sock_poll_wait(file, sock, wait);
729 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
730
731 if (sk->sk_shutdown & RCV_SHUTDOWN)
732 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
733 if (sk->sk_shutdown == SHUTDOWN_MASK)
734 revents |= EPOLLHUP;
735
736 switch (sk->sk_state) {
737 case TIPC_ESTABLISHED:
738 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
739 revents |= EPOLLOUT;
740 /* fall through */
741 case TIPC_LISTEN:
742 case TIPC_CONNECTING:
743 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
744 revents |= EPOLLIN | EPOLLRDNORM;
745 break;
746 case TIPC_OPEN:
747 if (tsk->group_is_open && !tsk->cong_link_cnt)
748 revents |= EPOLLOUT;
749 if (!tipc_sk_type_connectionless(sk))
750 break;
751 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
752 break;
753 revents |= EPOLLIN | EPOLLRDNORM;
754 break;
755 case TIPC_DISCONNECTING:
756 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
757 break;
758 }
759 return revents;
760 }
761
762 /**
763 * tipc_sendmcast - send multicast message
764 * @sock: socket structure
765 * @seq: destination address
766 * @msg: message to send
767 * @dlen: length of data to send
768 * @timeout: timeout to wait for wakeup
769 *
770 * Called from function tipc_sendmsg(), which has done all sanity checks
771 * Returns the number of bytes sent on success, or errno
772 */
tipc_sendmcast(struct socket * sock,struct tipc_name_seq * seq,struct msghdr * msg,size_t dlen,long timeout)773 static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
774 struct msghdr *msg, size_t dlen, long timeout)
775 {
776 struct sock *sk = sock->sk;
777 struct tipc_sock *tsk = tipc_sk(sk);
778 struct tipc_msg *hdr = &tsk->phdr;
779 struct net *net = sock_net(sk);
780 int mtu = tipc_bcast_get_mtu(net);
781 struct tipc_mc_method *method = &tsk->mc_method;
782 struct sk_buff_head pkts;
783 struct tipc_nlist dsts;
784 int rc;
785
786 if (tsk->group)
787 return -EACCES;
788
789 /* Block or return if any destination link is congested */
790 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
791 if (unlikely(rc))
792 return rc;
793
794 /* Lookup destination nodes */
795 tipc_nlist_init(&dsts, tipc_own_addr(net));
796 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
797 seq->upper, &dsts);
798 if (!dsts.local && !dsts.remote)
799 return -EHOSTUNREACH;
800
801 /* Build message header */
802 msg_set_type(hdr, TIPC_MCAST_MSG);
803 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
804 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
805 msg_set_destport(hdr, 0);
806 msg_set_destnode(hdr, 0);
807 msg_set_nametype(hdr, seq->type);
808 msg_set_namelower(hdr, seq->lower);
809 msg_set_nameupper(hdr, seq->upper);
810
811 /* Build message as chain of buffers */
812 __skb_queue_head_init(&pkts);
813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
814
815 /* Send message if build was successful */
816 if (unlikely(rc == dlen)) {
817 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
818 TIPC_DUMP_SK_SNDQ, " ");
819 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
820 &tsk->cong_link_cnt);
821 }
822
823 tipc_nlist_purge(&dsts);
824
825 return rc ? rc : dlen;
826 }
827
828 /**
829 * tipc_send_group_msg - send a message to a member in the group
830 * @net: network namespace
831 * @m: message to send
832 * @mb: group member
833 * @dnode: destination node
834 * @dport: destination port
835 * @dlen: total length of message data
836 */
tipc_send_group_msg(struct net * net,struct tipc_sock * tsk,struct msghdr * m,struct tipc_member * mb,u32 dnode,u32 dport,int dlen)837 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
838 struct msghdr *m, struct tipc_member *mb,
839 u32 dnode, u32 dport, int dlen)
840 {
841 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
842 struct tipc_mc_method *method = &tsk->mc_method;
843 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
844 struct tipc_msg *hdr = &tsk->phdr;
845 struct sk_buff_head pkts;
846 int mtu, rc;
847
848 /* Complete message header */
849 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
850 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
851 msg_set_destport(hdr, dport);
852 msg_set_destnode(hdr, dnode);
853 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
854
855 /* Build message as chain of buffers */
856 __skb_queue_head_init(&pkts);
857 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
859 if (unlikely(rc != dlen))
860 return rc;
861
862 /* Send message */
863 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
864 if (unlikely(rc == -ELINKCONG)) {
865 tipc_dest_push(&tsk->cong_links, dnode, 0);
866 tsk->cong_link_cnt++;
867 }
868
869 /* Update send window */
870 tipc_group_update_member(mb, blks);
871
872 /* A broadcast sent within next EXPIRE period must follow same path */
873 method->rcast = true;
874 method->mandatory = true;
875 return dlen;
876 }
877
878 /**
879 * tipc_send_group_unicast - send message to a member in the group
880 * @sock: socket structure
881 * @m: message to send
882 * @dlen: total length of message data
883 * @timeout: timeout to wait for wakeup
884 *
885 * Called from function tipc_sendmsg(), which has done all sanity checks
886 * Returns the number of bytes sent on success, or errno
887 */
tipc_send_group_unicast(struct socket * sock,struct msghdr * m,int dlen,long timeout)888 static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
889 int dlen, long timeout)
890 {
891 struct sock *sk = sock->sk;
892 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
893 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
894 struct tipc_sock *tsk = tipc_sk(sk);
895 struct net *net = sock_net(sk);
896 struct tipc_member *mb = NULL;
897 u32 node, port;
898 int rc;
899
900 node = dest->addr.id.node;
901 port = dest->addr.id.ref;
902 if (!port && !node)
903 return -EHOSTUNREACH;
904
905 /* Block or return if destination link or member is congested */
906 rc = tipc_wait_for_cond(sock, &timeout,
907 !tipc_dest_find(&tsk->cong_links, node, 0) &&
908 tsk->group &&
909 !tipc_group_cong(tsk->group, node, port, blks,
910 &mb));
911 if (unlikely(rc))
912 return rc;
913
914 if (unlikely(!mb))
915 return -EHOSTUNREACH;
916
917 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
918
919 return rc ? rc : dlen;
920 }
921
922 /**
923 * tipc_send_group_anycast - send message to any member with given identity
924 * @sock: socket structure
925 * @m: message to send
926 * @dlen: total length of message data
927 * @timeout: timeout to wait for wakeup
928 *
929 * Called from function tipc_sendmsg(), which has done all sanity checks
930 * Returns the number of bytes sent on success, or errno
931 */
tipc_send_group_anycast(struct socket * sock,struct msghdr * m,int dlen,long timeout)932 static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
933 int dlen, long timeout)
934 {
935 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
936 struct sock *sk = sock->sk;
937 struct tipc_sock *tsk = tipc_sk(sk);
938 struct list_head *cong_links = &tsk->cong_links;
939 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
940 struct tipc_msg *hdr = &tsk->phdr;
941 struct tipc_member *first = NULL;
942 struct tipc_member *mbr = NULL;
943 struct net *net = sock_net(sk);
944 u32 node, port, exclude;
945 struct list_head dsts;
946 u32 type, inst, scope;
947 int lookups = 0;
948 int dstcnt, rc;
949 bool cong;
950
951 INIT_LIST_HEAD(&dsts);
952
953 type = msg_nametype(hdr);
954 inst = dest->addr.name.name.instance;
955 scope = msg_lookup_scope(hdr);
956
957 while (++lookups < 4) {
958 exclude = tipc_group_exclude(tsk->group);
959
960 first = NULL;
961
962 /* Look for a non-congested destination member, if any */
963 while (1) {
964 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
965 &dstcnt, exclude, false))
966 return -EHOSTUNREACH;
967 tipc_dest_pop(&dsts, &node, &port);
968 cong = tipc_group_cong(tsk->group, node, port, blks,
969 &mbr);
970 if (!cong)
971 break;
972 if (mbr == first)
973 break;
974 if (!first)
975 first = mbr;
976 }
977
978 /* Start over if destination was not in member list */
979 if (unlikely(!mbr))
980 continue;
981
982 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
983 break;
984
985 /* Block or return if destination link or member is congested */
986 rc = tipc_wait_for_cond(sock, &timeout,
987 !tipc_dest_find(cong_links, node, 0) &&
988 tsk->group &&
989 !tipc_group_cong(tsk->group, node, port,
990 blks, &mbr));
991 if (unlikely(rc))
992 return rc;
993
994 /* Send, unless destination disappeared while waiting */
995 if (likely(mbr))
996 break;
997 }
998
999 if (unlikely(lookups >= 4))
1000 return -EHOSTUNREACH;
1001
1002 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1003
1004 return rc ? rc : dlen;
1005 }
1006
1007 /**
1008 * tipc_send_group_bcast - send message to all members in communication group
1009 * @sk: socket structure
1010 * @m: message to send
1011 * @dlen: total length of message data
1012 * @timeout: timeout to wait for wakeup
1013 *
1014 * Called from function tipc_sendmsg(), which has done all sanity checks
1015 * Returns the number of bytes sent on success, or errno
1016 */
tipc_send_group_bcast(struct socket * sock,struct msghdr * m,int dlen,long timeout)1017 static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1018 int dlen, long timeout)
1019 {
1020 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1021 struct sock *sk = sock->sk;
1022 struct net *net = sock_net(sk);
1023 struct tipc_sock *tsk = tipc_sk(sk);
1024 struct tipc_nlist *dsts;
1025 struct tipc_mc_method *method = &tsk->mc_method;
1026 bool ack = method->mandatory && method->rcast;
1027 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
1028 struct tipc_msg *hdr = &tsk->phdr;
1029 int mtu = tipc_bcast_get_mtu(net);
1030 struct sk_buff_head pkts;
1031 int rc = -EHOSTUNREACH;
1032
1033 /* Block or return if any destination link or member is congested */
1034 rc = tipc_wait_for_cond(sock, &timeout,
1035 !tsk->cong_link_cnt && tsk->group &&
1036 !tipc_group_bc_cong(tsk->group, blks));
1037 if (unlikely(rc))
1038 return rc;
1039
1040 dsts = tipc_group_dests(tsk->group);
1041 if (!dsts->local && !dsts->remote)
1042 return -EHOSTUNREACH;
1043
1044 /* Complete message header */
1045 if (dest) {
1046 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1047 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1048 } else {
1049 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1050 msg_set_nameinst(hdr, 0);
1051 }
1052 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
1053 msg_set_destport(hdr, 0);
1054 msg_set_destnode(hdr, 0);
1055 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1056
1057 /* Avoid getting stuck with repeated forced replicasts */
1058 msg_set_grp_bc_ack_req(hdr, ack);
1059
1060 /* Build message as chain of buffers */
1061 __skb_queue_head_init(&pkts);
1062 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1063 if (unlikely(rc != dlen))
1064 return rc;
1065
1066 /* Send message */
1067 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1068 if (unlikely(rc))
1069 return rc;
1070
1071 /* Update broadcast sequence number and send windows */
1072 tipc_group_update_bc_members(tsk->group, blks, ack);
1073
1074 /* Broadcast link is now free to choose method for next broadcast */
1075 method->mandatory = false;
1076 method->expires = jiffies;
1077
1078 return dlen;
1079 }
1080
1081 /**
1082 * tipc_send_group_mcast - send message to all members with given identity
1083 * @sock: socket structure
1084 * @m: message to send
1085 * @dlen: total length of message data
1086 * @timeout: timeout to wait for wakeup
1087 *
1088 * Called from function tipc_sendmsg(), which has done all sanity checks
1089 * Returns the number of bytes sent on success, or errno
1090 */
tipc_send_group_mcast(struct socket * sock,struct msghdr * m,int dlen,long timeout)1091 static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1092 int dlen, long timeout)
1093 {
1094 struct sock *sk = sock->sk;
1095 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1096 struct tipc_sock *tsk = tipc_sk(sk);
1097 struct tipc_group *grp = tsk->group;
1098 struct tipc_msg *hdr = &tsk->phdr;
1099 struct net *net = sock_net(sk);
1100 u32 type, inst, scope, exclude;
1101 struct list_head dsts;
1102 u32 dstcnt;
1103
1104 INIT_LIST_HEAD(&dsts);
1105
1106 type = msg_nametype(hdr);
1107 inst = dest->addr.name.name.instance;
1108 scope = msg_lookup_scope(hdr);
1109 exclude = tipc_group_exclude(grp);
1110
1111 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1112 &dstcnt, exclude, true))
1113 return -EHOSTUNREACH;
1114
1115 if (dstcnt == 1) {
1116 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1117 return tipc_send_group_unicast(sock, m, dlen, timeout);
1118 }
1119
1120 tipc_dest_list_purge(&dsts);
1121 return tipc_send_group_bcast(sock, m, dlen, timeout);
1122 }
1123
1124 /**
1125 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
1126 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1127 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1128 *
1129 * Multi-threaded: parallel calls with reference to same queues may occur
1130 */
tipc_sk_mcast_rcv(struct net * net,struct sk_buff_head * arrvq,struct sk_buff_head * inputq)1131 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1132 struct sk_buff_head *inputq)
1133 {
1134 u32 self = tipc_own_addr(net);
1135 u32 type, lower, upper, scope;
1136 struct sk_buff *skb, *_skb;
1137 u32 portid, onode;
1138 struct sk_buff_head tmpq;
1139 struct list_head dports;
1140 struct tipc_msg *hdr;
1141 int user, mtyp, hlen;
1142 bool exact;
1143
1144 __skb_queue_head_init(&tmpq);
1145 INIT_LIST_HEAD(&dports);
1146
1147 skb = tipc_skb_peek(arrvq, &inputq->lock);
1148 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
1149 hdr = buf_msg(skb);
1150 user = msg_user(hdr);
1151 mtyp = msg_type(hdr);
1152 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
1153 onode = msg_orignode(hdr);
1154 type = msg_nametype(hdr);
1155
1156 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1157 spin_lock_bh(&inputq->lock);
1158 if (skb_peek(arrvq) == skb) {
1159 __skb_dequeue(arrvq);
1160 __skb_queue_tail(inputq, skb);
1161 }
1162 kfree_skb(skb);
1163 spin_unlock_bh(&inputq->lock);
1164 continue;
1165 }
1166
1167 /* Group messages require exact scope match */
1168 if (msg_in_group(hdr)) {
1169 lower = 0;
1170 upper = ~0;
1171 scope = msg_lookup_scope(hdr);
1172 exact = true;
1173 } else {
1174 /* TIPC_NODE_SCOPE means "any scope" in this context */
1175 if (onode == self)
1176 scope = TIPC_NODE_SCOPE;
1177 else
1178 scope = TIPC_CLUSTER_SCOPE;
1179 exact = false;
1180 lower = msg_namelower(hdr);
1181 upper = msg_nameupper(hdr);
1182 }
1183
1184 /* Create destination port list: */
1185 tipc_nametbl_mc_lookup(net, type, lower, upper,
1186 scope, exact, &dports);
1187
1188 /* Clone message per destination */
1189 while (tipc_dest_pop(&dports, NULL, &portid)) {
1190 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
1191 if (_skb) {
1192 msg_set_destport(buf_msg(_skb), portid);
1193 __skb_queue_tail(&tmpq, _skb);
1194 continue;
1195 }
1196 pr_warn("Failed to clone mcast rcv buffer\n");
1197 }
1198 /* Append to inputq if not already done by other thread */
1199 spin_lock_bh(&inputq->lock);
1200 if (skb_peek(arrvq) == skb) {
1201 skb_queue_splice_tail_init(&tmpq, inputq);
1202 kfree_skb(__skb_dequeue(arrvq));
1203 }
1204 spin_unlock_bh(&inputq->lock);
1205 __skb_queue_purge(&tmpq);
1206 kfree_skb(skb);
1207 }
1208 tipc_sk_rcv(net, inputq);
1209 }
1210
1211 /**
1212 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
1213 * @tsk: receiving socket
1214 * @skb: pointer to message buffer.
1215 */
tipc_sk_conn_proto_rcv(struct tipc_sock * tsk,struct sk_buff * skb,struct sk_buff_head * inputq,struct sk_buff_head * xmitq)1216 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1217 struct sk_buff_head *inputq,
1218 struct sk_buff_head *xmitq)
1219 {
1220 struct tipc_msg *hdr = buf_msg(skb);
1221 u32 onode = tsk_own_node(tsk);
1222 struct sock *sk = &tsk->sk;
1223 int mtyp = msg_type(hdr);
1224 bool conn_cong;
1225
1226 /* Ignore if connection cannot be validated: */
1227 if (!tsk_peer_msg(tsk, hdr)) {
1228 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
1229 goto exit;
1230 }
1231
1232 if (unlikely(msg_errcode(hdr))) {
1233 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1234 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1235 tsk_peer_port(tsk));
1236 sk->sk_state_change(sk);
1237
1238 /* State change is ignored if socket already awake,
1239 * - convert msg to abort msg and add to inqueue
1240 */
1241 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1242 msg_set_type(hdr, TIPC_CONN_MSG);
1243 msg_set_size(hdr, BASIC_H_SIZE);
1244 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1245 __skb_queue_tail(inputq, skb);
1246 return;
1247 }
1248
1249 tsk->probe_unacked = false;
1250
1251 if (mtyp == CONN_PROBE) {
1252 msg_set_type(hdr, CONN_PROBE_REPLY);
1253 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1254 __skb_queue_tail(xmitq, skb);
1255 return;
1256 } else if (mtyp == CONN_ACK) {
1257 conn_cong = tsk_conn_cong(tsk);
1258 tsk->snt_unacked -= msg_conn_ack(hdr);
1259 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1260 tsk->snd_win = msg_adv_win(hdr);
1261 if (conn_cong)
1262 sk->sk_write_space(sk);
1263 } else if (mtyp != CONN_PROBE_REPLY) {
1264 pr_warn("Received unknown CONN_PROTO msg\n");
1265 }
1266 exit:
1267 kfree_skb(skb);
1268 }
1269
1270 /**
1271 * tipc_sendmsg - send message in connectionless manner
1272 * @sock: socket structure
1273 * @m: message to send
1274 * @dsz: amount of user data to be sent
1275 *
1276 * Message must have an destination specified explicitly.
1277 * Used for SOCK_RDM and SOCK_DGRAM messages,
1278 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1279 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
1280 *
1281 * Returns the number of bytes sent on success, or errno otherwise
1282 */
tipc_sendmsg(struct socket * sock,struct msghdr * m,size_t dsz)1283 static int tipc_sendmsg(struct socket *sock,
1284 struct msghdr *m, size_t dsz)
1285 {
1286 struct sock *sk = sock->sk;
1287 int ret;
1288
1289 lock_sock(sk);
1290 ret = __tipc_sendmsg(sock, m, dsz);
1291 release_sock(sk);
1292
1293 return ret;
1294 }
1295
__tipc_sendmsg(struct socket * sock,struct msghdr * m,size_t dlen)1296 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
1297 {
1298 struct sock *sk = sock->sk;
1299 struct net *net = sock_net(sk);
1300 struct tipc_sock *tsk = tipc_sk(sk);
1301 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1302 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1303 struct list_head *clinks = &tsk->cong_links;
1304 bool syn = !tipc_sk_type_connectionless(sk);
1305 struct tipc_group *grp = tsk->group;
1306 struct tipc_msg *hdr = &tsk->phdr;
1307 struct tipc_name_seq *seq;
1308 struct sk_buff_head pkts;
1309 u32 dport = 0, dnode = 0;
1310 u32 type = 0, inst = 0;
1311 int mtu, rc;
1312
1313 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
1314 return -EMSGSIZE;
1315
1316 if (likely(dest)) {
1317 if (unlikely(m->msg_namelen < sizeof(*dest)))
1318 return -EINVAL;
1319 if (unlikely(dest->family != AF_TIPC))
1320 return -EINVAL;
1321 }
1322
1323 if (grp) {
1324 if (!dest)
1325 return tipc_send_group_bcast(sock, m, dlen, timeout);
1326 if (dest->addrtype == TIPC_ADDR_NAME)
1327 return tipc_send_group_anycast(sock, m, dlen, timeout);
1328 if (dest->addrtype == TIPC_ADDR_ID)
1329 return tipc_send_group_unicast(sock, m, dlen, timeout);
1330 if (dest->addrtype == TIPC_ADDR_MCAST)
1331 return tipc_send_group_mcast(sock, m, dlen, timeout);
1332 return -EINVAL;
1333 }
1334
1335 if (unlikely(!dest)) {
1336 dest = &tsk->peer;
1337 if (!syn && dest->family != AF_TIPC)
1338 return -EDESTADDRREQ;
1339 }
1340
1341 if (unlikely(syn)) {
1342 if (sk->sk_state == TIPC_LISTEN)
1343 return -EPIPE;
1344 if (sk->sk_state != TIPC_OPEN)
1345 return -EISCONN;
1346 if (tsk->published)
1347 return -EOPNOTSUPP;
1348 if (dest->addrtype == TIPC_ADDR_NAME) {
1349 tsk->conn_type = dest->addr.name.name.type;
1350 tsk->conn_instance = dest->addr.name.name.instance;
1351 }
1352 msg_set_syn(hdr, 1);
1353 }
1354
1355 seq = &dest->addr.nameseq;
1356 if (dest->addrtype == TIPC_ADDR_MCAST)
1357 return tipc_sendmcast(sock, seq, m, dlen, timeout);
1358
1359 if (dest->addrtype == TIPC_ADDR_NAME) {
1360 type = dest->addr.name.name.type;
1361 inst = dest->addr.name.name.instance;
1362 dnode = dest->addr.name.domain;
1363 dport = tipc_nametbl_translate(net, type, inst, &dnode);
1364 if (unlikely(!dport && !dnode))
1365 return -EHOSTUNREACH;
1366 } else if (dest->addrtype == TIPC_ADDR_ID) {
1367 dnode = dest->addr.id.node;
1368 } else {
1369 return -EINVAL;
1370 }
1371
1372 /* Block or return if destination link is congested */
1373 rc = tipc_wait_for_cond(sock, &timeout,
1374 !tipc_dest_find(clinks, dnode, 0));
1375 if (unlikely(rc))
1376 return rc;
1377
1378 if (dest->addrtype == TIPC_ADDR_NAME) {
1379 msg_set_type(hdr, TIPC_NAMED_MSG);
1380 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1381 msg_set_nametype(hdr, type);
1382 msg_set_nameinst(hdr, inst);
1383 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1384 msg_set_destnode(hdr, dnode);
1385 msg_set_destport(hdr, dport);
1386 } else { /* TIPC_ADDR_ID */
1387 msg_set_type(hdr, TIPC_DIRECT_MSG);
1388 msg_set_lookup_scope(hdr, 0);
1389 msg_set_destnode(hdr, dnode);
1390 msg_set_destport(hdr, dest->addr.id.ref);
1391 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1392 }
1393
1394 __skb_queue_head_init(&pkts);
1395 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
1396 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1397 if (unlikely(rc != dlen))
1398 return rc;
1399 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1400 __skb_queue_purge(&pkts);
1401 return -ENOMEM;
1402 }
1403
1404 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
1405 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1406 if (unlikely(rc == -ELINKCONG)) {
1407 tipc_dest_push(clinks, dnode, 0);
1408 tsk->cong_link_cnt++;
1409 rc = 0;
1410 }
1411
1412 if (unlikely(syn && !rc))
1413 tipc_set_sk_state(sk, TIPC_CONNECTING);
1414
1415 return rc ? rc : dlen;
1416 }
1417
1418 /**
1419 * tipc_sendstream - send stream-oriented data
1420 * @sock: socket structure
1421 * @m: data to send
1422 * @dsz: total length of data to be transmitted
1423 *
1424 * Used for SOCK_STREAM data.
1425 *
1426 * Returns the number of bytes sent on success (or partial success),
1427 * or errno if no data sent
1428 */
tipc_sendstream(struct socket * sock,struct msghdr * m,size_t dsz)1429 static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
1430 {
1431 struct sock *sk = sock->sk;
1432 int ret;
1433
1434 lock_sock(sk);
1435 ret = __tipc_sendstream(sock, m, dsz);
1436 release_sock(sk);
1437
1438 return ret;
1439 }
1440
__tipc_sendstream(struct socket * sock,struct msghdr * m,size_t dlen)1441 static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
1442 {
1443 struct sock *sk = sock->sk;
1444 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1445 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1446 struct tipc_sock *tsk = tipc_sk(sk);
1447 struct tipc_msg *hdr = &tsk->phdr;
1448 struct net *net = sock_net(sk);
1449 struct sk_buff_head pkts;
1450 u32 dnode = tsk_peer_node(tsk);
1451 int send, sent = 0;
1452 int rc = 0;
1453
1454 __skb_queue_head_init(&pkts);
1455
1456 if (unlikely(dlen > INT_MAX))
1457 return -EMSGSIZE;
1458
1459 /* Handle implicit connection setup */
1460 if (unlikely(dest)) {
1461 rc = __tipc_sendmsg(sock, m, dlen);
1462 if (dlen && dlen == rc) {
1463 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1464 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1465 }
1466 return rc;
1467 }
1468
1469 do {
1470 rc = tipc_wait_for_cond(sock, &timeout,
1471 (!tsk->cong_link_cnt &&
1472 !tsk_conn_cong(tsk) &&
1473 tipc_sk_connected(sk)));
1474 if (unlikely(rc))
1475 break;
1476
1477 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
1478 rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
1479 if (unlikely(rc != send))
1480 break;
1481
1482 trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
1483 TIPC_DUMP_SK_SNDQ, " ");
1484 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1485 if (unlikely(rc == -ELINKCONG)) {
1486 tsk->cong_link_cnt = 1;
1487 rc = 0;
1488 }
1489 if (likely(!rc)) {
1490 tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
1491 sent += send;
1492 }
1493 } while (sent < dlen && !rc);
1494
1495 return sent ? sent : rc;
1496 }
1497
1498 /**
1499 * tipc_send_packet - send a connection-oriented message
1500 * @sock: socket structure
1501 * @m: message to send
1502 * @dsz: length of data to be transmitted
1503 *
1504 * Used for SOCK_SEQPACKET messages.
1505 *
1506 * Returns the number of bytes sent on success, or errno otherwise
1507 */
tipc_send_packet(struct socket * sock,struct msghdr * m,size_t dsz)1508 static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
1509 {
1510 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1511 return -EMSGSIZE;
1512
1513 return tipc_sendstream(sock, m, dsz);
1514 }
1515
1516 /* tipc_sk_finish_conn - complete the setup of a connection
1517 */
tipc_sk_finish_conn(struct tipc_sock * tsk,u32 peer_port,u32 peer_node)1518 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1519 u32 peer_node)
1520 {
1521 struct sock *sk = &tsk->sk;
1522 struct net *net = sock_net(sk);
1523 struct tipc_msg *msg = &tsk->phdr;
1524
1525 msg_set_syn(msg, 0);
1526 msg_set_destnode(msg, peer_node);
1527 msg_set_destport(msg, peer_port);
1528 msg_set_type(msg, TIPC_CONN_MSG);
1529 msg_set_lookup_scope(msg, 0);
1530 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1531
1532 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
1533 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
1534 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1535 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1536 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1537 __skb_queue_purge(&sk->sk_write_queue);
1538 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1539 return;
1540
1541 /* Fall back to message based flow control */
1542 tsk->rcv_win = FLOWCTL_MSG_WIN;
1543 tsk->snd_win = FLOWCTL_MSG_WIN;
1544 }
1545
1546 /**
1547 * tipc_sk_set_orig_addr - capture sender's address for received message
1548 * @m: descriptor for message info
1549 * @hdr: received message header
1550 *
1551 * Note: Address is not captured if not requested by receiver.
1552 */
tipc_sk_set_orig_addr(struct msghdr * m,struct sk_buff * skb)1553 static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1554 {
1555 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1556 struct tipc_msg *hdr = buf_msg(skb);
1557
1558 if (!srcaddr)
1559 return;
1560
1561 srcaddr->sock.family = AF_TIPC;
1562 srcaddr->sock.addrtype = TIPC_ADDR_ID;
1563 srcaddr->sock.scope = 0;
1564 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1565 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1566 srcaddr->sock.addr.name.domain = 0;
1567 m->msg_namelen = sizeof(struct sockaddr_tipc);
1568
1569 if (!msg_in_group(hdr))
1570 return;
1571
1572 /* Group message users may also want to know sending member's id */
1573 srcaddr->member.family = AF_TIPC;
1574 srcaddr->member.addrtype = TIPC_ADDR_NAME;
1575 srcaddr->member.scope = 0;
1576 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1577 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1578 srcaddr->member.addr.name.domain = 0;
1579 m->msg_namelen = sizeof(*srcaddr);
1580 }
1581
1582 /**
1583 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1584 * @m: descriptor for message info
1585 * @skb: received message buffer
1586 * @tsk: TIPC port associated with message
1587 *
1588 * Note: Ancillary data is not captured if not requested by receiver.
1589 *
1590 * Returns 0 if successful, otherwise errno
1591 */
tipc_sk_anc_data_recv(struct msghdr * m,struct sk_buff * skb,struct tipc_sock * tsk)1592 static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
1593 struct tipc_sock *tsk)
1594 {
1595 struct tipc_msg *msg;
1596 u32 anc_data[3];
1597 u32 err;
1598 u32 dest_type;
1599 int has_name;
1600 int res;
1601
1602 if (likely(m->msg_controllen == 0))
1603 return 0;
1604 msg = buf_msg(skb);
1605
1606 /* Optionally capture errored message object(s) */
1607 err = msg ? msg_errcode(msg) : 0;
1608 if (unlikely(err)) {
1609 anc_data[0] = err;
1610 anc_data[1] = msg_data_sz(msg);
1611 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1612 if (res)
1613 return res;
1614 if (anc_data[1]) {
1615 if (skb_linearize(skb))
1616 return -ENOMEM;
1617 msg = buf_msg(skb);
1618 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1619 msg_data(msg));
1620 if (res)
1621 return res;
1622 }
1623 }
1624
1625 /* Optionally capture message destination object */
1626 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1627 switch (dest_type) {
1628 case TIPC_NAMED_MSG:
1629 has_name = 1;
1630 anc_data[0] = msg_nametype(msg);
1631 anc_data[1] = msg_namelower(msg);
1632 anc_data[2] = msg_namelower(msg);
1633 break;
1634 case TIPC_MCAST_MSG:
1635 has_name = 1;
1636 anc_data[0] = msg_nametype(msg);
1637 anc_data[1] = msg_namelower(msg);
1638 anc_data[2] = msg_nameupper(msg);
1639 break;
1640 case TIPC_CONN_MSG:
1641 has_name = (tsk->conn_type != 0);
1642 anc_data[0] = tsk->conn_type;
1643 anc_data[1] = tsk->conn_instance;
1644 anc_data[2] = tsk->conn_instance;
1645 break;
1646 default:
1647 has_name = 0;
1648 }
1649 if (has_name) {
1650 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1651 if (res)
1652 return res;
1653 }
1654
1655 return 0;
1656 }
1657
tipc_sk_send_ack(struct tipc_sock * tsk)1658 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1659 {
1660 struct sock *sk = &tsk->sk;
1661 struct net *net = sock_net(sk);
1662 struct sk_buff *skb = NULL;
1663 struct tipc_msg *msg;
1664 u32 peer_port = tsk_peer_port(tsk);
1665 u32 dnode = tsk_peer_node(tsk);
1666
1667 if (!tipc_sk_connected(sk))
1668 return;
1669 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1670 dnode, tsk_own_node(tsk), peer_port,
1671 tsk->portid, TIPC_OK);
1672 if (!skb)
1673 return;
1674 msg = buf_msg(skb);
1675 msg_set_conn_ack(msg, tsk->rcv_unacked);
1676 tsk->rcv_unacked = 0;
1677
1678 /* Adjust to and advertize the correct window limit */
1679 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1680 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1681 msg_set_adv_win(msg, tsk->rcv_win);
1682 }
1683 tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1684 }
1685
tipc_wait_for_rcvmsg(struct socket * sock,long * timeop)1686 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1687 {
1688 struct sock *sk = sock->sk;
1689 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1690 long timeo = *timeop;
1691 int err = sock_error(sk);
1692
1693 if (err)
1694 return err;
1695
1696 for (;;) {
1697 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1698 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1699 err = -ENOTCONN;
1700 break;
1701 }
1702 add_wait_queue(sk_sleep(sk), &wait);
1703 release_sock(sk);
1704 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1705 sched_annotate_sleep();
1706 lock_sock(sk);
1707 remove_wait_queue(sk_sleep(sk), &wait);
1708 }
1709 err = 0;
1710 if (!skb_queue_empty(&sk->sk_receive_queue))
1711 break;
1712 err = -EAGAIN;
1713 if (!timeo)
1714 break;
1715 err = sock_intr_errno(timeo);
1716 if (signal_pending(current))
1717 break;
1718
1719 err = sock_error(sk);
1720 if (err)
1721 break;
1722 }
1723 *timeop = timeo;
1724 return err;
1725 }
1726
1727 /**
1728 * tipc_recvmsg - receive packet-oriented message
1729 * @m: descriptor for message info
1730 * @buflen: length of user buffer area
1731 * @flags: receive flags
1732 *
1733 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1734 * If the complete message doesn't fit in user area, truncate it.
1735 *
1736 * Returns size of returned message data, errno otherwise
1737 */
tipc_recvmsg(struct socket * sock,struct msghdr * m,size_t buflen,int flags)1738 static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1739 size_t buflen, int flags)
1740 {
1741 struct sock *sk = sock->sk;
1742 bool connected = !tipc_sk_type_connectionless(sk);
1743 struct tipc_sock *tsk = tipc_sk(sk);
1744 int rc, err, hlen, dlen, copy;
1745 struct sk_buff_head xmitq;
1746 struct tipc_msg *hdr;
1747 struct sk_buff *skb;
1748 bool grp_evt;
1749 long timeout;
1750
1751 /* Catch invalid receive requests */
1752 if (unlikely(!buflen))
1753 return -EINVAL;
1754
1755 lock_sock(sk);
1756 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1757 rc = -ENOTCONN;
1758 goto exit;
1759 }
1760 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1761
1762 /* Step rcv queue to first msg with data or error; wait if necessary */
1763 do {
1764 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1765 if (unlikely(rc))
1766 goto exit;
1767 skb = skb_peek(&sk->sk_receive_queue);
1768 hdr = buf_msg(skb);
1769 dlen = msg_data_sz(hdr);
1770 hlen = msg_hdr_sz(hdr);
1771 err = msg_errcode(hdr);
1772 grp_evt = msg_is_grp_evt(hdr);
1773 if (likely(dlen || err))
1774 break;
1775 tsk_advance_rx_queue(sk);
1776 } while (1);
1777
1778 /* Collect msg meta data, including error code and rejected data */
1779 tipc_sk_set_orig_addr(m, skb);
1780 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1781 if (unlikely(rc))
1782 goto exit;
1783 hdr = buf_msg(skb);
1784
1785 /* Capture data if non-error msg, otherwise just set return value */
1786 if (likely(!err)) {
1787 copy = min_t(int, dlen, buflen);
1788 if (unlikely(copy != dlen))
1789 m->msg_flags |= MSG_TRUNC;
1790 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1791 } else {
1792 copy = 0;
1793 rc = 0;
1794 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1795 rc = -ECONNRESET;
1796 }
1797 if (unlikely(rc))
1798 goto exit;
1799
1800 /* Mark message as group event if applicable */
1801 if (unlikely(grp_evt)) {
1802 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1803 m->msg_flags |= MSG_EOR;
1804 m->msg_flags |= MSG_OOB;
1805 copy = 0;
1806 }
1807
1808 /* Caption of data or error code/rejected data was successful */
1809 if (unlikely(flags & MSG_PEEK))
1810 goto exit;
1811
1812 /* Send group flow control advertisement when applicable */
1813 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1814 __skb_queue_head_init(&xmitq);
1815 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1816 msg_orignode(hdr), msg_origport(hdr),
1817 &xmitq);
1818 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1819 }
1820
1821 tsk_advance_rx_queue(sk);
1822
1823 if (likely(!connected))
1824 goto exit;
1825
1826 /* Send connection flow control advertisement when applicable */
1827 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1828 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1829 tipc_sk_send_ack(tsk);
1830 exit:
1831 release_sock(sk);
1832 return rc ? rc : copy;
1833 }
1834
1835 /**
1836 * tipc_recvstream - receive stream-oriented data
1837 * @m: descriptor for message info
1838 * @buflen: total size of user buffer area
1839 * @flags: receive flags
1840 *
1841 * Used for SOCK_STREAM messages only. If not enough data is available
1842 * will optionally wait for more; never truncates data.
1843 *
1844 * Returns size of returned message data, errno otherwise
1845 */
tipc_recvstream(struct socket * sock,struct msghdr * m,size_t buflen,int flags)1846 static int tipc_recvstream(struct socket *sock, struct msghdr *m,
1847 size_t buflen, int flags)
1848 {
1849 struct sock *sk = sock->sk;
1850 struct tipc_sock *tsk = tipc_sk(sk);
1851 struct sk_buff *skb;
1852 struct tipc_msg *hdr;
1853 struct tipc_skb_cb *skb_cb;
1854 bool peek = flags & MSG_PEEK;
1855 int offset, required, copy, copied = 0;
1856 int hlen, dlen, err, rc;
1857 long timeout;
1858
1859 /* Catch invalid receive attempts */
1860 if (unlikely(!buflen))
1861 return -EINVAL;
1862
1863 lock_sock(sk);
1864
1865 if (unlikely(sk->sk_state == TIPC_OPEN)) {
1866 rc = -ENOTCONN;
1867 goto exit;
1868 }
1869 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
1870 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1871
1872 do {
1873 /* Look at first msg in receive queue; wait if necessary */
1874 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1875 if (unlikely(rc))
1876 break;
1877 skb = skb_peek(&sk->sk_receive_queue);
1878 skb_cb = TIPC_SKB_CB(skb);
1879 hdr = buf_msg(skb);
1880 dlen = msg_data_sz(hdr);
1881 hlen = msg_hdr_sz(hdr);
1882 err = msg_errcode(hdr);
1883
1884 /* Discard any empty non-errored (SYN-) message */
1885 if (unlikely(!dlen && !err)) {
1886 tsk_advance_rx_queue(sk);
1887 continue;
1888 }
1889
1890 /* Collect msg meta data, incl. error code and rejected data */
1891 if (!copied) {
1892 tipc_sk_set_orig_addr(m, skb);
1893 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1894 if (rc)
1895 break;
1896 hdr = buf_msg(skb);
1897 }
1898
1899 /* Copy data if msg ok, otherwise return error/partial data */
1900 if (likely(!err)) {
1901 offset = skb_cb->bytes_read;
1902 copy = min_t(int, dlen - offset, buflen - copied);
1903 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
1904 if (unlikely(rc))
1905 break;
1906 copied += copy;
1907 offset += copy;
1908 if (unlikely(offset < dlen)) {
1909 if (!peek)
1910 skb_cb->bytes_read = offset;
1911 break;
1912 }
1913 } else {
1914 rc = 0;
1915 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
1916 rc = -ECONNRESET;
1917 if (copied || rc)
1918 break;
1919 }
1920
1921 if (unlikely(peek))
1922 break;
1923
1924 tsk_advance_rx_queue(sk);
1925
1926 /* Send connection flow control advertisement when applicable */
1927 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1928 if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
1929 tipc_sk_send_ack(tsk);
1930
1931 /* Exit if all requested data or FIN/error received */
1932 if (copied == buflen || err)
1933 break;
1934
1935 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
1936 exit:
1937 release_sock(sk);
1938 return copied ? copied : rc;
1939 }
1940
1941 /**
1942 * tipc_write_space - wake up thread if port congestion is released
1943 * @sk: socket
1944 */
tipc_write_space(struct sock * sk)1945 static void tipc_write_space(struct sock *sk)
1946 {
1947 struct socket_wq *wq;
1948
1949 rcu_read_lock();
1950 wq = rcu_dereference(sk->sk_wq);
1951 if (skwq_has_sleeper(wq))
1952 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
1953 EPOLLWRNORM | EPOLLWRBAND);
1954 rcu_read_unlock();
1955 }
1956
1957 /**
1958 * tipc_data_ready - wake up threads to indicate messages have been received
1959 * @sk: socket
1960 * @len: the length of messages
1961 */
tipc_data_ready(struct sock * sk)1962 static void tipc_data_ready(struct sock *sk)
1963 {
1964 struct socket_wq *wq;
1965
1966 rcu_read_lock();
1967 wq = rcu_dereference(sk->sk_wq);
1968 if (skwq_has_sleeper(wq))
1969 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
1970 EPOLLRDNORM | EPOLLRDBAND);
1971 rcu_read_unlock();
1972 }
1973
tipc_sock_destruct(struct sock * sk)1974 static void tipc_sock_destruct(struct sock *sk)
1975 {
1976 __skb_queue_purge(&sk->sk_receive_queue);
1977 }
1978
tipc_sk_proto_rcv(struct sock * sk,struct sk_buff_head * inputq,struct sk_buff_head * xmitq)1979 static void tipc_sk_proto_rcv(struct sock *sk,
1980 struct sk_buff_head *inputq,
1981 struct sk_buff_head *xmitq)
1982 {
1983 struct sk_buff *skb = __skb_dequeue(inputq);
1984 struct tipc_sock *tsk = tipc_sk(sk);
1985 struct tipc_msg *hdr = buf_msg(skb);
1986 struct tipc_group *grp = tsk->group;
1987 bool wakeup = false;
1988
1989 switch (msg_user(hdr)) {
1990 case CONN_MANAGER:
1991 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
1992 return;
1993 case SOCK_WAKEUP:
1994 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
1995 /* coupled with smp_rmb() in tipc_wait_for_cond() */
1996 smp_wmb();
1997 tsk->cong_link_cnt--;
1998 wakeup = true;
1999 break;
2000 case GROUP_PROTOCOL:
2001 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
2002 break;
2003 case TOP_SRV:
2004 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2005 hdr, inputq, xmitq);
2006 break;
2007 default:
2008 break;
2009 }
2010
2011 if (wakeup)
2012 sk->sk_write_space(sk);
2013
2014 kfree_skb(skb);
2015 }
2016
2017 /**
2018 * tipc_sk_filter_connect - check incoming message for a connection-based socket
2019 * @tsk: TIPC socket
2020 * @skb: pointer to message buffer.
2021 * Returns true if message should be added to receive queue, false otherwise
2022 */
tipc_sk_filter_connect(struct tipc_sock * tsk,struct sk_buff * skb)2023 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
2024 {
2025 struct sock *sk = &tsk->sk;
2026 struct net *net = sock_net(sk);
2027 struct tipc_msg *hdr = buf_msg(skb);
2028 bool con_msg = msg_connected(hdr);
2029 u32 pport = tsk_peer_port(tsk);
2030 u32 pnode = tsk_peer_node(tsk);
2031 u32 oport = msg_origport(hdr);
2032 u32 onode = msg_orignode(hdr);
2033 int err = msg_errcode(hdr);
2034 unsigned long delay;
2035
2036 if (unlikely(msg_mcast(hdr)))
2037 return false;
2038
2039 switch (sk->sk_state) {
2040 case TIPC_CONNECTING:
2041 /* Setup ACK */
2042 if (likely(con_msg)) {
2043 if (err)
2044 break;
2045 tipc_sk_finish_conn(tsk, oport, onode);
2046 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2047 /* ACK+ message with data is added to receive queue */
2048 if (msg_data_sz(hdr))
2049 return true;
2050 /* Empty ACK-, - wake up sleeping connect() and drop */
2051 sk->sk_state_change(sk);
2052 msg_set_dest_droppable(hdr, 1);
2053 return false;
2054 }
2055 /* Ignore connectionless message if not from listening socket */
2056 if (oport != pport || onode != pnode)
2057 return false;
2058
2059 /* Rejected SYN */
2060 if (err != TIPC_ERR_OVERLOAD)
2061 break;
2062
2063 /* Prepare for new setup attempt if we have a SYN clone */
2064 if (skb_queue_empty(&sk->sk_write_queue))
2065 break;
2066 get_random_bytes(&delay, 2);
2067 delay %= (tsk->conn_timeout / 4);
2068 delay = msecs_to_jiffies(delay + 100);
2069 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2070 return false;
2071 case TIPC_OPEN:
2072 case TIPC_DISCONNECTING:
2073 return false;
2074 case TIPC_LISTEN:
2075 /* Accept only SYN message */
2076 if (!msg_is_syn(hdr) &&
2077 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2078 return false;
2079 if (!con_msg && !err)
2080 return true;
2081 return false;
2082 case TIPC_ESTABLISHED:
2083 /* Accept only connection-based messages sent by peer */
2084 if (likely(con_msg && !err && pport == oport && pnode == onode))
2085 return true;
2086 if (!tsk_peer_msg(tsk, hdr))
2087 return false;
2088 if (!err)
2089 return true;
2090 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2091 tipc_node_remove_conn(net, pnode, tsk->portid);
2092 sk->sk_state_change(sk);
2093 return true;
2094 default:
2095 pr_err("Unknown sk_state %u\n", sk->sk_state);
2096 }
2097 /* Abort connection setup attempt */
2098 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2099 sk->sk_err = ECONNREFUSED;
2100 sk->sk_state_change(sk);
2101 return true;
2102 }
2103
2104 /**
2105 * rcvbuf_limit - get proper overload limit of socket receive queue
2106 * @sk: socket
2107 * @skb: message
2108 *
2109 * For connection oriented messages, irrespective of importance,
2110 * default queue limit is 2 MB.
2111 *
2112 * For connectionless messages, queue limits are based on message
2113 * importance as follows:
2114 *
2115 * TIPC_LOW_IMPORTANCE (2 MB)
2116 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2117 * TIPC_HIGH_IMPORTANCE (8 MB)
2118 * TIPC_CRITICAL_IMPORTANCE (16 MB)
2119 *
2120 * Returns overload limit according to corresponding message importance
2121 */
rcvbuf_limit(struct sock * sk,struct sk_buff * skb)2122 static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
2123 {
2124 struct tipc_sock *tsk = tipc_sk(sk);
2125 struct tipc_msg *hdr = buf_msg(skb);
2126
2127 if (unlikely(msg_in_group(hdr)))
2128 return READ_ONCE(sk->sk_rcvbuf);
2129
2130 if (unlikely(!msg_connected(hdr)))
2131 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
2132
2133 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2134 return READ_ONCE(sk->sk_rcvbuf);
2135
2136 return FLOWCTL_MSG_LIM;
2137 }
2138
2139 /**
2140 * tipc_sk_filter_rcv - validate incoming message
2141 * @sk: socket
2142 * @skb: pointer to message.
2143 *
2144 * Enqueues message on receive queue if acceptable; optionally handles
2145 * disconnect indication for a connected socket.
2146 *
2147 * Called with socket lock already taken
2148 *
2149 */
tipc_sk_filter_rcv(struct sock * sk,struct sk_buff * skb,struct sk_buff_head * xmitq)2150 static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2151 struct sk_buff_head *xmitq)
2152 {
2153 bool sk_conn = !tipc_sk_type_connectionless(sk);
2154 struct tipc_sock *tsk = tipc_sk(sk);
2155 struct tipc_group *grp = tsk->group;
2156 struct tipc_msg *hdr = buf_msg(skb);
2157 struct net *net = sock_net(sk);
2158 struct sk_buff_head inputq;
2159 int mtyp = msg_type(hdr);
2160 int limit, err = TIPC_OK;
2161
2162 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
2163 TIPC_SKB_CB(skb)->bytes_read = 0;
2164 __skb_queue_head_init(&inputq);
2165 __skb_queue_tail(&inputq, skb);
2166
2167 if (unlikely(!msg_isdata(hdr)))
2168 tipc_sk_proto_rcv(sk, &inputq, xmitq);
2169
2170 if (unlikely(grp))
2171 tipc_group_filter_msg(grp, &inputq, xmitq);
2172
2173 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
2174 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2175
2176 /* Validate and add to receive buffer if there is space */
2177 while ((skb = __skb_dequeue(&inputq))) {
2178 hdr = buf_msg(skb);
2179 limit = rcvbuf_limit(sk, skb);
2180 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb)) ||
2181 (!sk_conn && msg_connected(hdr)) ||
2182 (!grp && msg_in_group(hdr)))
2183 err = TIPC_ERR_NO_PORT;
2184 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
2185 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2186 "err_overload2!");
2187 atomic_inc(&sk->sk_drops);
2188 err = TIPC_ERR_OVERLOAD;
2189 }
2190
2191 if (unlikely(err)) {
2192 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2193 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2194 "@filter_rcv!");
2195 __skb_queue_tail(xmitq, skb);
2196 }
2197 err = TIPC_OK;
2198 continue;
2199 }
2200 __skb_queue_tail(&sk->sk_receive_queue, skb);
2201 skb_set_owner_r(skb, sk);
2202 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2203 "rcvq >90% allocated!");
2204 sk->sk_data_ready(sk);
2205 }
2206 }
2207
2208 /**
2209 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
2210 * @sk: socket
2211 * @skb: message
2212 *
2213 * Caller must hold socket lock
2214 */
tipc_sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)2215 static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
2216 {
2217 unsigned int before = sk_rmem_alloc_get(sk);
2218 struct sk_buff_head xmitq;
2219 unsigned int added;
2220
2221 __skb_queue_head_init(&xmitq);
2222
2223 tipc_sk_filter_rcv(sk, skb, &xmitq);
2224 added = sk_rmem_alloc_get(sk) - before;
2225 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2226
2227 /* Send pending response/rejected messages, if any */
2228 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2229 return 0;
2230 }
2231
2232 /**
2233 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2234 * inputq and try adding them to socket or backlog queue
2235 * @inputq: list of incoming buffers with potentially different destinations
2236 * @sk: socket where the buffers should be enqueued
2237 * @dport: port number for the socket
2238 *
2239 * Caller must hold socket lock
2240 */
tipc_sk_enqueue(struct sk_buff_head * inputq,struct sock * sk,u32 dport,struct sk_buff_head * xmitq)2241 static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
2242 u32 dport, struct sk_buff_head *xmitq)
2243 {
2244 unsigned long time_limit = jiffies + 2;
2245 struct sk_buff *skb;
2246 unsigned int lim;
2247 atomic_t *dcnt;
2248 u32 onode;
2249
2250 while (skb_queue_len(inputq)) {
2251 if (unlikely(time_after_eq(jiffies, time_limit)))
2252 return;
2253
2254 skb = tipc_skb_dequeue(inputq, dport);
2255 if (unlikely(!skb))
2256 return;
2257
2258 /* Add message directly to receive queue if possible */
2259 if (!sock_owned_by_user(sk)) {
2260 tipc_sk_filter_rcv(sk, skb, xmitq);
2261 continue;
2262 }
2263
2264 /* Try backlog, compensating for double-counted bytes */
2265 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
2266 if (!sk->sk_backlog.len)
2267 atomic_set(dcnt, 0);
2268 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
2269 if (likely(!sk_add_backlog(sk, skb, lim))) {
2270 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2271 "bklg & rcvq >90% allocated!");
2272 continue;
2273 }
2274
2275 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
2276 /* Overload => reject message back to sender */
2277 onode = tipc_own_addr(sock_net(sk));
2278 atomic_inc(&sk->sk_drops);
2279 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2280 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2281 "@sk_enqueue!");
2282 __skb_queue_tail(xmitq, skb);
2283 }
2284 break;
2285 }
2286 }
2287
2288 /**
2289 * tipc_sk_rcv - handle a chain of incoming buffers
2290 * @inputq: buffer list containing the buffers
2291 * Consumes all buffers in list until inputq is empty
2292 * Note: may be called in multiple threads referring to the same queue
2293 */
tipc_sk_rcv(struct net * net,struct sk_buff_head * inputq)2294 void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
2295 {
2296 struct sk_buff_head xmitq;
2297 u32 dnode, dport = 0;
2298 int err;
2299 struct tipc_sock *tsk;
2300 struct sock *sk;
2301 struct sk_buff *skb;
2302
2303 __skb_queue_head_init(&xmitq);
2304 while (skb_queue_len(inputq)) {
2305 dport = tipc_skb_peek_port(inputq, dport);
2306 tsk = tipc_sk_lookup(net, dport);
2307
2308 if (likely(tsk)) {
2309 sk = &tsk->sk;
2310 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
2311 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
2312 spin_unlock_bh(&sk->sk_lock.slock);
2313 }
2314 /* Send pending response/rejected messages, if any */
2315 tipc_node_distr_xmit(sock_net(sk), &xmitq);
2316 sock_put(sk);
2317 continue;
2318 }
2319 /* No destination socket => dequeue skb if still there */
2320 skb = tipc_skb_dequeue(inputq, dport);
2321 if (!skb)
2322 return;
2323
2324 /* Try secondary lookup if unresolved named message */
2325 err = TIPC_ERR_NO_PORT;
2326 if (tipc_msg_lookup_dest(net, skb, &err))
2327 goto xmit;
2328
2329 /* Prepare for message rejection */
2330 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
2331 continue;
2332
2333 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
2334 xmit:
2335 dnode = msg_destnode(buf_msg(skb));
2336 tipc_node_xmit_skb(net, skb, dnode, dport);
2337 }
2338 }
2339
tipc_wait_for_connect(struct socket * sock,long * timeo_p)2340 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2341 {
2342 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2343 struct sock *sk = sock->sk;
2344 int done;
2345
2346 do {
2347 int err = sock_error(sk);
2348 if (err)
2349 return err;
2350 if (!*timeo_p)
2351 return -ETIMEDOUT;
2352 if (signal_pending(current))
2353 return sock_intr_errno(*timeo_p);
2354
2355 add_wait_queue(sk_sleep(sk), &wait);
2356 done = sk_wait_event(sk, timeo_p,
2357 sk->sk_state != TIPC_CONNECTING, &wait);
2358 remove_wait_queue(sk_sleep(sk), &wait);
2359 } while (!done);
2360 return 0;
2361 }
2362
tipc_sockaddr_is_sane(struct sockaddr_tipc * addr)2363 static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2364 {
2365 if (addr->family != AF_TIPC)
2366 return false;
2367 if (addr->addrtype == TIPC_SERVICE_RANGE)
2368 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2369 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2370 addr->addrtype == TIPC_SOCKET_ADDR);
2371 }
2372
2373 /**
2374 * tipc_connect - establish a connection to another TIPC port
2375 * @sock: socket structure
2376 * @dest: socket address for destination port
2377 * @destlen: size of socket address data structure
2378 * @flags: file-related flags associated with socket
2379 *
2380 * Returns 0 on success, errno otherwise
2381 */
tipc_connect(struct socket * sock,struct sockaddr * dest,int destlen,int flags)2382 static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2383 int destlen, int flags)
2384 {
2385 struct sock *sk = sock->sk;
2386 struct tipc_sock *tsk = tipc_sk(sk);
2387 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2388 struct msghdr m = {NULL,};
2389 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2390 int previous;
2391 int res = 0;
2392
2393 if (destlen != sizeof(struct sockaddr_tipc))
2394 return -EINVAL;
2395
2396 lock_sock(sk);
2397
2398 if (tsk->group) {
2399 res = -EINVAL;
2400 goto exit;
2401 }
2402
2403 if (dst->family == AF_UNSPEC) {
2404 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2405 if (!tipc_sk_type_connectionless(sk))
2406 res = -EINVAL;
2407 goto exit;
2408 }
2409 if (!tipc_sockaddr_is_sane(dst)) {
2410 res = -EINVAL;
2411 goto exit;
2412 }
2413 /* DGRAM/RDM connect(), just save the destaddr */
2414 if (tipc_sk_type_connectionless(sk)) {
2415 memcpy(&tsk->peer, dest, destlen);
2416 goto exit;
2417 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2418 res = -EINVAL;
2419 goto exit;
2420 }
2421
2422 previous = sk->sk_state;
2423
2424 switch (sk->sk_state) {
2425 case TIPC_OPEN:
2426 /* Send a 'SYN-' to destination */
2427 m.msg_name = dest;
2428 m.msg_namelen = destlen;
2429
2430 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2431 * indicate send_msg() is never blocked.
2432 */
2433 if (!timeout)
2434 m.msg_flags = MSG_DONTWAIT;
2435
2436 res = __tipc_sendmsg(sock, &m, 0);
2437 if ((res < 0) && (res != -EWOULDBLOCK))
2438 goto exit;
2439
2440 /* Just entered TIPC_CONNECTING state; the only
2441 * difference is that return value in non-blocking
2442 * case is EINPROGRESS, rather than EALREADY.
2443 */
2444 res = -EINPROGRESS;
2445 /* fall through */
2446 case TIPC_CONNECTING:
2447 if (!timeout) {
2448 if (previous == TIPC_CONNECTING)
2449 res = -EALREADY;
2450 goto exit;
2451 }
2452 timeout = msecs_to_jiffies(timeout);
2453 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2454 res = tipc_wait_for_connect(sock, &timeout);
2455 break;
2456 case TIPC_ESTABLISHED:
2457 res = -EISCONN;
2458 break;
2459 default:
2460 res = -EINVAL;
2461 }
2462
2463 exit:
2464 release_sock(sk);
2465 return res;
2466 }
2467
2468 /**
2469 * tipc_listen - allow socket to listen for incoming connections
2470 * @sock: socket structure
2471 * @len: (unused)
2472 *
2473 * Returns 0 on success, errno otherwise
2474 */
tipc_listen(struct socket * sock,int len)2475 static int tipc_listen(struct socket *sock, int len)
2476 {
2477 struct sock *sk = sock->sk;
2478 int res;
2479
2480 lock_sock(sk);
2481 res = tipc_set_sk_state(sk, TIPC_LISTEN);
2482 release_sock(sk);
2483
2484 return res;
2485 }
2486
tipc_wait_for_accept(struct socket * sock,long timeo)2487 static int tipc_wait_for_accept(struct socket *sock, long timeo)
2488 {
2489 struct sock *sk = sock->sk;
2490 DEFINE_WAIT(wait);
2491 int err;
2492
2493 /* True wake-one mechanism for incoming connections: only
2494 * one process gets woken up, not the 'whole herd'.
2495 * Since we do not 'race & poll' for established sockets
2496 * anymore, the common case will execute the loop only once.
2497 */
2498 for (;;) {
2499 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2500 TASK_INTERRUPTIBLE);
2501 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
2502 release_sock(sk);
2503 timeo = schedule_timeout(timeo);
2504 lock_sock(sk);
2505 }
2506 err = 0;
2507 if (!skb_queue_empty(&sk->sk_receive_queue))
2508 break;
2509 err = -EAGAIN;
2510 if (!timeo)
2511 break;
2512 err = sock_intr_errno(timeo);
2513 if (signal_pending(current))
2514 break;
2515 }
2516 finish_wait(sk_sleep(sk), &wait);
2517 return err;
2518 }
2519
2520 /**
2521 * tipc_accept - wait for connection request
2522 * @sock: listening socket
2523 * @newsock: new socket that is to be connected
2524 * @flags: file-related flags associated with socket
2525 *
2526 * Returns 0 on success, errno otherwise
2527 */
tipc_accept(struct socket * sock,struct socket * new_sock,int flags,bool kern)2528 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2529 bool kern)
2530 {
2531 struct sock *new_sk, *sk = sock->sk;
2532 struct sk_buff *buf;
2533 struct tipc_sock *new_tsock;
2534 struct tipc_msg *msg;
2535 long timeo;
2536 int res;
2537
2538 lock_sock(sk);
2539
2540 if (sk->sk_state != TIPC_LISTEN) {
2541 res = -EINVAL;
2542 goto exit;
2543 }
2544 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2545 res = tipc_wait_for_accept(sock, timeo);
2546 if (res)
2547 goto exit;
2548
2549 buf = skb_peek(&sk->sk_receive_queue);
2550
2551 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
2552 if (res)
2553 goto exit;
2554 security_sk_clone(sock->sk, new_sock->sk);
2555
2556 new_sk = new_sock->sk;
2557 new_tsock = tipc_sk(new_sk);
2558 msg = buf_msg(buf);
2559
2560 /* we lock on new_sk; but lockdep sees the lock on sk */
2561 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
2562
2563 /*
2564 * Reject any stray messages received by new socket
2565 * before the socket lock was taken (very, very unlikely)
2566 */
2567 tsk_rej_rx_queue(new_sk);
2568
2569 /* Connect new socket to it's peer */
2570 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
2571
2572 tsk_set_importance(new_tsock, msg_importance(msg));
2573 if (msg_named(msg)) {
2574 new_tsock->conn_type = msg_nametype(msg);
2575 new_tsock->conn_instance = msg_nameinst(msg);
2576 }
2577
2578 /*
2579 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2580 * Respond to 'SYN+' by queuing it on new socket.
2581 */
2582 if (!msg_data_sz(msg)) {
2583 struct msghdr m = {NULL,};
2584
2585 tsk_advance_rx_queue(sk);
2586 __tipc_sendstream(new_sock, &m, 0);
2587 } else {
2588 __skb_dequeue(&sk->sk_receive_queue);
2589 __skb_queue_head(&new_sk->sk_receive_queue, buf);
2590 skb_set_owner_r(buf, new_sk);
2591 }
2592 release_sock(new_sk);
2593 exit:
2594 release_sock(sk);
2595 return res;
2596 }
2597
2598 /**
2599 * tipc_shutdown - shutdown socket connection
2600 * @sock: socket structure
2601 * @how: direction to close (must be SHUT_RDWR)
2602 *
2603 * Terminates connection (if necessary), then purges socket's receive queue.
2604 *
2605 * Returns 0 on success, errno otherwise
2606 */
tipc_shutdown(struct socket * sock,int how)2607 static int tipc_shutdown(struct socket *sock, int how)
2608 {
2609 struct sock *sk = sock->sk;
2610 int res;
2611
2612 if (how != SHUT_RDWR)
2613 return -EINVAL;
2614
2615 lock_sock(sk);
2616
2617 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
2618 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
2619 sk->sk_shutdown = SEND_SHUTDOWN;
2620
2621 if (sk->sk_state == TIPC_DISCONNECTING) {
2622 /* Discard any unreceived messages */
2623 __skb_queue_purge(&sk->sk_receive_queue);
2624
2625 /* Wake up anyone sleeping in poll */
2626 sk->sk_state_change(sk);
2627 res = 0;
2628 } else {
2629 res = -ENOTCONN;
2630 }
2631
2632 release_sock(sk);
2633 return res;
2634 }
2635
tipc_sk_check_probing_state(struct sock * sk,struct sk_buff_head * list)2636 static void tipc_sk_check_probing_state(struct sock *sk,
2637 struct sk_buff_head *list)
2638 {
2639 struct tipc_sock *tsk = tipc_sk(sk);
2640 u32 pnode = tsk_peer_node(tsk);
2641 u32 pport = tsk_peer_port(tsk);
2642 u32 self = tsk_own_node(tsk);
2643 u32 oport = tsk->portid;
2644 struct sk_buff *skb;
2645
2646 if (tsk->probe_unacked) {
2647 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2648 sk->sk_err = ECONNABORTED;
2649 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2650 sk->sk_state_change(sk);
2651 return;
2652 }
2653 /* Prepare new probe */
2654 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2655 pnode, self, pport, oport, TIPC_OK);
2656 if (skb)
2657 __skb_queue_tail(list, skb);
2658 tsk->probe_unacked = true;
2659 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2660 }
2661
tipc_sk_retry_connect(struct sock * sk,struct sk_buff_head * list)2662 static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2663 {
2664 struct tipc_sock *tsk = tipc_sk(sk);
2665
2666 /* Try again later if dest link is congested */
2667 if (tsk->cong_link_cnt) {
2668 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2669 return;
2670 }
2671 /* Prepare SYN for retransmit */
2672 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2673 }
2674
tipc_sk_timeout(struct timer_list * t)2675 static void tipc_sk_timeout(struct timer_list *t)
2676 {
2677 struct sock *sk = from_timer(sk, t, sk_timer);
2678 struct tipc_sock *tsk = tipc_sk(sk);
2679 u32 pnode = tsk_peer_node(tsk);
2680 struct sk_buff_head list;
2681 int rc = 0;
2682
2683 __skb_queue_head_init(&list);
2684 bh_lock_sock(sk);
2685
2686 /* Try again later if socket is busy */
2687 if (sock_owned_by_user(sk)) {
2688 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
2689 bh_unlock_sock(sk);
2690 sock_put(sk);
2691 return;
2692 }
2693
2694 if (sk->sk_state == TIPC_ESTABLISHED)
2695 tipc_sk_check_probing_state(sk, &list);
2696 else if (sk->sk_state == TIPC_CONNECTING)
2697 tipc_sk_retry_connect(sk, &list);
2698
2699 bh_unlock_sock(sk);
2700
2701 if (!skb_queue_empty(&list))
2702 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2703
2704 /* SYN messages may cause link congestion */
2705 if (rc == -ELINKCONG) {
2706 tipc_dest_push(&tsk->cong_links, pnode, 0);
2707 tsk->cong_link_cnt = 1;
2708 }
2709 sock_put(sk);
2710 }
2711
tipc_sk_publish(struct tipc_sock * tsk,uint scope,struct tipc_name_seq const * seq)2712 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2713 struct tipc_name_seq const *seq)
2714 {
2715 struct sock *sk = &tsk->sk;
2716 struct net *net = sock_net(sk);
2717 struct publication *publ;
2718 u32 key;
2719
2720 if (scope != TIPC_NODE_SCOPE)
2721 scope = TIPC_CLUSTER_SCOPE;
2722
2723 if (tipc_sk_connected(sk))
2724 return -EINVAL;
2725 key = tsk->portid + tsk->pub_count + 1;
2726 if (key == tsk->portid)
2727 return -EADDRINUSE;
2728
2729 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2730 scope, tsk->portid, key);
2731 if (unlikely(!publ))
2732 return -EINVAL;
2733
2734 list_add(&publ->binding_sock, &tsk->publications);
2735 tsk->pub_count++;
2736 tsk->published = 1;
2737 return 0;
2738 }
2739
tipc_sk_withdraw(struct tipc_sock * tsk,uint scope,struct tipc_name_seq const * seq)2740 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2741 struct tipc_name_seq const *seq)
2742 {
2743 struct net *net = sock_net(&tsk->sk);
2744 struct publication *publ;
2745 struct publication *safe;
2746 int rc = -EINVAL;
2747
2748 if (scope != TIPC_NODE_SCOPE)
2749 scope = TIPC_CLUSTER_SCOPE;
2750
2751 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2752 if (seq) {
2753 if (publ->scope != scope)
2754 continue;
2755 if (publ->type != seq->type)
2756 continue;
2757 if (publ->lower != seq->lower)
2758 continue;
2759 if (publ->upper != seq->upper)
2760 break;
2761 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2762 publ->upper, publ->key);
2763 rc = 0;
2764 break;
2765 }
2766 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2767 publ->upper, publ->key);
2768 rc = 0;
2769 }
2770 if (list_empty(&tsk->publications))
2771 tsk->published = 0;
2772 return rc;
2773 }
2774
2775 /* tipc_sk_reinit: set non-zero address in all existing sockets
2776 * when we go from standalone to network mode.
2777 */
tipc_sk_reinit(struct net * net)2778 void tipc_sk_reinit(struct net *net)
2779 {
2780 struct tipc_net *tn = net_generic(net, tipc_net_id);
2781 struct rhashtable_iter iter;
2782 struct tipc_sock *tsk;
2783 struct tipc_msg *msg;
2784
2785 rhashtable_walk_enter(&tn->sk_rht, &iter);
2786
2787 do {
2788 rhashtable_walk_start(&iter);
2789
2790 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2791 sock_hold(&tsk->sk);
2792 rhashtable_walk_stop(&iter);
2793 lock_sock(&tsk->sk);
2794 msg = &tsk->phdr;
2795 msg_set_prevnode(msg, tipc_own_addr(net));
2796 msg_set_orignode(msg, tipc_own_addr(net));
2797 release_sock(&tsk->sk);
2798 rhashtable_walk_start(&iter);
2799 sock_put(&tsk->sk);
2800 }
2801
2802 rhashtable_walk_stop(&iter);
2803 } while (tsk == ERR_PTR(-EAGAIN));
2804
2805 rhashtable_walk_exit(&iter);
2806 }
2807
tipc_sk_lookup(struct net * net,u32 portid)2808 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2809 {
2810 struct tipc_net *tn = net_generic(net, tipc_net_id);
2811 struct tipc_sock *tsk;
2812
2813 rcu_read_lock();
2814 tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
2815 if (tsk)
2816 sock_hold(&tsk->sk);
2817 rcu_read_unlock();
2818
2819 return tsk;
2820 }
2821
tipc_sk_insert(struct tipc_sock * tsk)2822 static int tipc_sk_insert(struct tipc_sock *tsk)
2823 {
2824 struct sock *sk = &tsk->sk;
2825 struct net *net = sock_net(sk);
2826 struct tipc_net *tn = net_generic(net, tipc_net_id);
2827 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2828 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2829
2830 while (remaining--) {
2831 portid++;
2832 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2833 portid = TIPC_MIN_PORT;
2834 tsk->portid = portid;
2835 sock_hold(&tsk->sk);
2836 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
2837 tsk_rht_params))
2838 return 0;
2839 sock_put(&tsk->sk);
2840 }
2841
2842 return -1;
2843 }
2844
tipc_sk_remove(struct tipc_sock * tsk)2845 static void tipc_sk_remove(struct tipc_sock *tsk)
2846 {
2847 struct sock *sk = &tsk->sk;
2848 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2849
2850 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
2851 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
2852 __sock_put(sk);
2853 }
2854 }
2855
2856 static const struct rhashtable_params tsk_rht_params = {
2857 .nelem_hint = 192,
2858 .head_offset = offsetof(struct tipc_sock, node),
2859 .key_offset = offsetof(struct tipc_sock, portid),
2860 .key_len = sizeof(u32), /* portid */
2861 .max_size = 1048576,
2862 .min_size = 256,
2863 .automatic_shrinking = true,
2864 };
2865
tipc_sk_rht_init(struct net * net)2866 int tipc_sk_rht_init(struct net *net)
2867 {
2868 struct tipc_net *tn = net_generic(net, tipc_net_id);
2869
2870 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
2871 }
2872
tipc_sk_rht_destroy(struct net * net)2873 void tipc_sk_rht_destroy(struct net *net)
2874 {
2875 struct tipc_net *tn = net_generic(net, tipc_net_id);
2876
2877 /* Wait for socket readers to complete */
2878 synchronize_net();
2879
2880 rhashtable_destroy(&tn->sk_rht);
2881 }
2882
tipc_sk_join(struct tipc_sock * tsk,struct tipc_group_req * mreq)2883 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
2884 {
2885 struct net *net = sock_net(&tsk->sk);
2886 struct tipc_group *grp = tsk->group;
2887 struct tipc_msg *hdr = &tsk->phdr;
2888 struct tipc_name_seq seq;
2889 int rc;
2890
2891 if (mreq->type < TIPC_RESERVED_TYPES)
2892 return -EACCES;
2893 if (mreq->scope > TIPC_NODE_SCOPE)
2894 return -EINVAL;
2895 if (grp)
2896 return -EACCES;
2897 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
2898 if (!grp)
2899 return -ENOMEM;
2900 tsk->group = grp;
2901 msg_set_lookup_scope(hdr, mreq->scope);
2902 msg_set_nametype(hdr, mreq->type);
2903 msg_set_dest_droppable(hdr, true);
2904 seq.type = mreq->type;
2905 seq.lower = mreq->instance;
2906 seq.upper = seq.lower;
2907 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
2908 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
2909 if (rc) {
2910 tipc_group_delete(net, grp);
2911 tsk->group = NULL;
2912 return rc;
2913 }
2914 /* Eliminate any risk that a broadcast overtakes sent JOINs */
2915 tsk->mc_method.rcast = true;
2916 tsk->mc_method.mandatory = true;
2917 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
2918 return rc;
2919 }
2920
tipc_sk_leave(struct tipc_sock * tsk)2921 static int tipc_sk_leave(struct tipc_sock *tsk)
2922 {
2923 struct net *net = sock_net(&tsk->sk);
2924 struct tipc_group *grp = tsk->group;
2925 struct tipc_name_seq seq;
2926 int scope;
2927
2928 if (!grp)
2929 return -EINVAL;
2930 tipc_group_self(grp, &seq, &scope);
2931 tipc_group_delete(net, grp);
2932 tsk->group = NULL;
2933 tipc_sk_withdraw(tsk, scope, &seq);
2934 return 0;
2935 }
2936
2937 /**
2938 * tipc_setsockopt - set socket option
2939 * @sock: socket structure
2940 * @lvl: option level
2941 * @opt: option identifier
2942 * @ov: pointer to new option value
2943 * @ol: length of option value
2944 *
2945 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
2946 * (to ease compatibility).
2947 *
2948 * Returns 0 on success, errno otherwise
2949 */
tipc_setsockopt(struct socket * sock,int lvl,int opt,char __user * ov,unsigned int ol)2950 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
2951 char __user *ov, unsigned int ol)
2952 {
2953 struct sock *sk = sock->sk;
2954 struct tipc_sock *tsk = tipc_sk(sk);
2955 struct tipc_group_req mreq;
2956 u32 value = 0;
2957 int res = 0;
2958
2959 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
2960 return 0;
2961 if (lvl != SOL_TIPC)
2962 return -ENOPROTOOPT;
2963
2964 switch (opt) {
2965 case TIPC_IMPORTANCE:
2966 case TIPC_SRC_DROPPABLE:
2967 case TIPC_DEST_DROPPABLE:
2968 case TIPC_CONN_TIMEOUT:
2969 if (ol < sizeof(value))
2970 return -EINVAL;
2971 if (get_user(value, (u32 __user *)ov))
2972 return -EFAULT;
2973 break;
2974 case TIPC_GROUP_JOIN:
2975 if (ol < sizeof(mreq))
2976 return -EINVAL;
2977 if (copy_from_user(&mreq, ov, sizeof(mreq)))
2978 return -EFAULT;
2979 break;
2980 default:
2981 if (ov || ol)
2982 return -EINVAL;
2983 }
2984
2985 lock_sock(sk);
2986
2987 switch (opt) {
2988 case TIPC_IMPORTANCE:
2989 res = tsk_set_importance(tsk, value);
2990 break;
2991 case TIPC_SRC_DROPPABLE:
2992 if (sock->type != SOCK_STREAM)
2993 tsk_set_unreliable(tsk, value);
2994 else
2995 res = -ENOPROTOOPT;
2996 break;
2997 case TIPC_DEST_DROPPABLE:
2998 tsk_set_unreturnable(tsk, value);
2999 break;
3000 case TIPC_CONN_TIMEOUT:
3001 tipc_sk(sk)->conn_timeout = value;
3002 break;
3003 case TIPC_MCAST_BROADCAST:
3004 tsk->mc_method.rcast = false;
3005 tsk->mc_method.mandatory = true;
3006 break;
3007 case TIPC_MCAST_REPLICAST:
3008 tsk->mc_method.rcast = true;
3009 tsk->mc_method.mandatory = true;
3010 break;
3011 case TIPC_GROUP_JOIN:
3012 res = tipc_sk_join(tsk, &mreq);
3013 break;
3014 case TIPC_GROUP_LEAVE:
3015 res = tipc_sk_leave(tsk);
3016 break;
3017 default:
3018 res = -EINVAL;
3019 }
3020
3021 release_sock(sk);
3022
3023 return res;
3024 }
3025
3026 /**
3027 * tipc_getsockopt - get socket option
3028 * @sock: socket structure
3029 * @lvl: option level
3030 * @opt: option identifier
3031 * @ov: receptacle for option value
3032 * @ol: receptacle for length of option value
3033 *
3034 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
3035 * (to ease compatibility).
3036 *
3037 * Returns 0 on success, errno otherwise
3038 */
tipc_getsockopt(struct socket * sock,int lvl,int opt,char __user * ov,int __user * ol)3039 static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3040 char __user *ov, int __user *ol)
3041 {
3042 struct sock *sk = sock->sk;
3043 struct tipc_sock *tsk = tipc_sk(sk);
3044 struct tipc_name_seq seq;
3045 int len, scope;
3046 u32 value;
3047 int res;
3048
3049 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3050 return put_user(0, ol);
3051 if (lvl != SOL_TIPC)
3052 return -ENOPROTOOPT;
3053 res = get_user(len, ol);
3054 if (res)
3055 return res;
3056
3057 lock_sock(sk);
3058
3059 switch (opt) {
3060 case TIPC_IMPORTANCE:
3061 value = tsk_importance(tsk);
3062 break;
3063 case TIPC_SRC_DROPPABLE:
3064 value = tsk_unreliable(tsk);
3065 break;
3066 case TIPC_DEST_DROPPABLE:
3067 value = tsk_unreturnable(tsk);
3068 break;
3069 case TIPC_CONN_TIMEOUT:
3070 value = tsk->conn_timeout;
3071 /* no need to set "res", since already 0 at this point */
3072 break;
3073 case TIPC_NODE_RECVQ_DEPTH:
3074 value = 0; /* was tipc_queue_size, now obsolete */
3075 break;
3076 case TIPC_SOCK_RECVQ_DEPTH:
3077 value = skb_queue_len(&sk->sk_receive_queue);
3078 break;
3079 case TIPC_SOCK_RECVQ_USED:
3080 value = sk_rmem_alloc_get(sk);
3081 break;
3082 case TIPC_GROUP_JOIN:
3083 seq.type = 0;
3084 if (tsk->group)
3085 tipc_group_self(tsk->group, &seq, &scope);
3086 value = seq.type;
3087 break;
3088 default:
3089 res = -EINVAL;
3090 }
3091
3092 release_sock(sk);
3093
3094 if (res)
3095 return res; /* "get" failed */
3096
3097 if (len < sizeof(value))
3098 return -EINVAL;
3099
3100 if (copy_to_user(ov, &value, sizeof(value)))
3101 return -EFAULT;
3102
3103 return put_user(sizeof(value), ol);
3104 }
3105
tipc_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)3106 static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3107 {
3108 struct net *net = sock_net(sock->sk);
3109 struct tipc_sioc_nodeid_req nr = {0};
3110 struct tipc_sioc_ln_req lnr;
3111 void __user *argp = (void __user *)arg;
3112
3113 switch (cmd) {
3114 case SIOCGETLINKNAME:
3115 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3116 return -EFAULT;
3117 if (!tipc_node_get_linkname(net,
3118 lnr.bearer_id & 0xffff, lnr.peer,
3119 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3120 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3121 return -EFAULT;
3122 return 0;
3123 }
3124 return -EADDRNOTAVAIL;
3125 case SIOCGETNODEID:
3126 if (copy_from_user(&nr, argp, sizeof(nr)))
3127 return -EFAULT;
3128 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3129 return -EADDRNOTAVAIL;
3130 if (copy_to_user(argp, &nr, sizeof(nr)))
3131 return -EFAULT;
3132 return 0;
3133 default:
3134 return -ENOIOCTLCMD;
3135 }
3136 }
3137
tipc_socketpair(struct socket * sock1,struct socket * sock2)3138 static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3139 {
3140 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3141 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
3142 u32 onode = tipc_own_addr(sock_net(sock1->sk));
3143
3144 tsk1->peer.family = AF_TIPC;
3145 tsk1->peer.addrtype = TIPC_ADDR_ID;
3146 tsk1->peer.scope = TIPC_NODE_SCOPE;
3147 tsk1->peer.addr.id.ref = tsk2->portid;
3148 tsk1->peer.addr.id.node = onode;
3149 tsk2->peer.family = AF_TIPC;
3150 tsk2->peer.addrtype = TIPC_ADDR_ID;
3151 tsk2->peer.scope = TIPC_NODE_SCOPE;
3152 tsk2->peer.addr.id.ref = tsk1->portid;
3153 tsk2->peer.addr.id.node = onode;
3154
3155 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3156 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
3157 return 0;
3158 }
3159
3160 /* Protocol switches for the various types of TIPC sockets */
3161
3162 static const struct proto_ops msg_ops = {
3163 .owner = THIS_MODULE,
3164 .family = AF_TIPC,
3165 .release = tipc_release,
3166 .bind = tipc_bind,
3167 .connect = tipc_connect,
3168 .socketpair = tipc_socketpair,
3169 .accept = sock_no_accept,
3170 .getname = tipc_getname,
3171 .poll = tipc_poll,
3172 .ioctl = tipc_ioctl,
3173 .listen = sock_no_listen,
3174 .shutdown = tipc_shutdown,
3175 .setsockopt = tipc_setsockopt,
3176 .getsockopt = tipc_getsockopt,
3177 .sendmsg = tipc_sendmsg,
3178 .recvmsg = tipc_recvmsg,
3179 .mmap = sock_no_mmap,
3180 .sendpage = sock_no_sendpage
3181 };
3182
3183 static const struct proto_ops packet_ops = {
3184 .owner = THIS_MODULE,
3185 .family = AF_TIPC,
3186 .release = tipc_release,
3187 .bind = tipc_bind,
3188 .connect = tipc_connect,
3189 .socketpair = tipc_socketpair,
3190 .accept = tipc_accept,
3191 .getname = tipc_getname,
3192 .poll = tipc_poll,
3193 .ioctl = tipc_ioctl,
3194 .listen = tipc_listen,
3195 .shutdown = tipc_shutdown,
3196 .setsockopt = tipc_setsockopt,
3197 .getsockopt = tipc_getsockopt,
3198 .sendmsg = tipc_send_packet,
3199 .recvmsg = tipc_recvmsg,
3200 .mmap = sock_no_mmap,
3201 .sendpage = sock_no_sendpage
3202 };
3203
3204 static const struct proto_ops stream_ops = {
3205 .owner = THIS_MODULE,
3206 .family = AF_TIPC,
3207 .release = tipc_release,
3208 .bind = tipc_bind,
3209 .connect = tipc_connect,
3210 .socketpair = tipc_socketpair,
3211 .accept = tipc_accept,
3212 .getname = tipc_getname,
3213 .poll = tipc_poll,
3214 .ioctl = tipc_ioctl,
3215 .listen = tipc_listen,
3216 .shutdown = tipc_shutdown,
3217 .setsockopt = tipc_setsockopt,
3218 .getsockopt = tipc_getsockopt,
3219 .sendmsg = tipc_sendstream,
3220 .recvmsg = tipc_recvstream,
3221 .mmap = sock_no_mmap,
3222 .sendpage = sock_no_sendpage
3223 };
3224
3225 static const struct net_proto_family tipc_family_ops = {
3226 .owner = THIS_MODULE,
3227 .family = AF_TIPC,
3228 .create = tipc_sk_create
3229 };
3230
3231 static struct proto tipc_proto = {
3232 .name = "TIPC",
3233 .owner = THIS_MODULE,
3234 .obj_size = sizeof(struct tipc_sock),
3235 .sysctl_rmem = sysctl_tipc_rmem
3236 };
3237
3238 /**
3239 * tipc_socket_init - initialize TIPC socket interface
3240 *
3241 * Returns 0 on success, errno otherwise
3242 */
tipc_socket_init(void)3243 int tipc_socket_init(void)
3244 {
3245 int res;
3246
3247 res = proto_register(&tipc_proto, 1);
3248 if (res) {
3249 pr_err("Failed to register TIPC protocol type\n");
3250 goto out;
3251 }
3252
3253 res = sock_register(&tipc_family_ops);
3254 if (res) {
3255 pr_err("Failed to register TIPC socket type\n");
3256 proto_unregister(&tipc_proto);
3257 goto out;
3258 }
3259 out:
3260 return res;
3261 }
3262
3263 /**
3264 * tipc_socket_stop - stop TIPC socket interface
3265 */
tipc_socket_stop(void)3266 void tipc_socket_stop(void)
3267 {
3268 sock_unregister(tipc_family_ops.family);
3269 proto_unregister(&tipc_proto);
3270 }
3271
3272 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_add_sk_con(struct sk_buff * skb,struct tipc_sock * tsk)3273 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3274 {
3275 u32 peer_node;
3276 u32 peer_port;
3277 struct nlattr *nest;
3278
3279 peer_node = tsk_peer_node(tsk);
3280 peer_port = tsk_peer_port(tsk);
3281
3282 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
3283 if (!nest)
3284 return -EMSGSIZE;
3285
3286 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3287 goto msg_full;
3288 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3289 goto msg_full;
3290
3291 if (tsk->conn_type != 0) {
3292 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3293 goto msg_full;
3294 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3295 goto msg_full;
3296 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3297 goto msg_full;
3298 }
3299 nla_nest_end(skb, nest);
3300
3301 return 0;
3302
3303 msg_full:
3304 nla_nest_cancel(skb, nest);
3305
3306 return -EMSGSIZE;
3307 }
3308
__tipc_nl_add_sk_info(struct sk_buff * skb,struct tipc_sock * tsk)3309 static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3310 *tsk)
3311 {
3312 struct net *net = sock_net(skb->sk);
3313 struct sock *sk = &tsk->sk;
3314
3315 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3316 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
3317 return -EMSGSIZE;
3318
3319 if (tipc_sk_connected(sk)) {
3320 if (__tipc_nl_add_sk_con(skb, tsk))
3321 return -EMSGSIZE;
3322 } else if (!list_empty(&tsk->publications)) {
3323 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3324 return -EMSGSIZE;
3325 }
3326 return 0;
3327 }
3328
3329 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_add_sk(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk)3330 static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3331 struct tipc_sock *tsk)
3332 {
3333 struct nlattr *attrs;
3334 void *hdr;
3335
3336 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3337 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
3338 if (!hdr)
3339 goto msg_cancel;
3340
3341 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3342 if (!attrs)
3343 goto genlmsg_cancel;
3344
3345 if (__tipc_nl_add_sk_info(skb, tsk))
3346 goto attr_msg_cancel;
3347
3348 nla_nest_end(skb, attrs);
3349 genlmsg_end(skb, hdr);
3350
3351 return 0;
3352
3353 attr_msg_cancel:
3354 nla_nest_cancel(skb, attrs);
3355 genlmsg_cancel:
3356 genlmsg_cancel(skb, hdr);
3357 msg_cancel:
3358 return -EMSGSIZE;
3359 }
3360
tipc_nl_sk_walk(struct sk_buff * skb,struct netlink_callback * cb,int (* skb_handler)(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk))3361 int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3362 int (*skb_handler)(struct sk_buff *skb,
3363 struct netlink_callback *cb,
3364 struct tipc_sock *tsk))
3365 {
3366 struct rhashtable_iter *iter = (void *)cb->args[4];
3367 struct tipc_sock *tsk;
3368 int err;
3369
3370 rhashtable_walk_start(iter);
3371 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3372 if (IS_ERR(tsk)) {
3373 err = PTR_ERR(tsk);
3374 if (err == -EAGAIN) {
3375 err = 0;
3376 continue;
3377 }
3378 break;
3379 }
3380
3381 sock_hold(&tsk->sk);
3382 rhashtable_walk_stop(iter);
3383 lock_sock(&tsk->sk);
3384 err = skb_handler(skb, cb, tsk);
3385 if (err) {
3386 release_sock(&tsk->sk);
3387 sock_put(&tsk->sk);
3388 goto out;
3389 }
3390 release_sock(&tsk->sk);
3391 rhashtable_walk_start(iter);
3392 sock_put(&tsk->sk);
3393 }
3394 rhashtable_walk_stop(iter);
3395 out:
3396 return skb->len;
3397 }
3398 EXPORT_SYMBOL(tipc_nl_sk_walk);
3399
tipc_dump_start(struct netlink_callback * cb)3400 int tipc_dump_start(struct netlink_callback *cb)
3401 {
3402 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3403 }
3404 EXPORT_SYMBOL(tipc_dump_start);
3405
__tipc_dump_start(struct netlink_callback * cb,struct net * net)3406 int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3407 {
3408 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3409 struct rhashtable_iter *iter = (void *)cb->args[4];
3410 struct tipc_net *tn = tipc_net(net);
3411
3412 if (!iter) {
3413 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3414 if (!iter)
3415 return -ENOMEM;
3416
3417 cb->args[4] = (long)iter;
3418 }
3419
3420 rhashtable_walk_enter(&tn->sk_rht, iter);
3421 return 0;
3422 }
3423
tipc_dump_done(struct netlink_callback * cb)3424 int tipc_dump_done(struct netlink_callback *cb)
3425 {
3426 struct rhashtable_iter *hti = (void *)cb->args[4];
3427
3428 rhashtable_walk_exit(hti);
3429 kfree(hti);
3430 return 0;
3431 }
3432 EXPORT_SYMBOL(tipc_dump_done);
3433
tipc_sk_fill_sock_diag(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk,u32 sk_filter_state,u64 (* tipc_diag_gen_cookie)(struct sock * sk))3434 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3435 struct tipc_sock *tsk, u32 sk_filter_state,
3436 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3437 {
3438 struct sock *sk = &tsk->sk;
3439 struct nlattr *attrs;
3440 struct nlattr *stat;
3441
3442 /*filter response w.r.t sk_state*/
3443 if (!(sk_filter_state & (1 << sk->sk_state)))
3444 return 0;
3445
3446 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
3447 if (!attrs)
3448 goto msg_cancel;
3449
3450 if (__tipc_nl_add_sk_info(skb, tsk))
3451 goto attr_msg_cancel;
3452
3453 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3454 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3455 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3456 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
3457 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
3458 sock_i_uid(sk))) ||
3459 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3460 tipc_diag_gen_cookie(sk),
3461 TIPC_NLA_SOCK_PAD))
3462 goto attr_msg_cancel;
3463
3464 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
3465 if (!stat)
3466 goto attr_msg_cancel;
3467
3468 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3469 skb_queue_len(&sk->sk_receive_queue)) ||
3470 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
3471 skb_queue_len(&sk->sk_write_queue)) ||
3472 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3473 atomic_read(&sk->sk_drops)))
3474 goto stat_msg_cancel;
3475
3476 if (tsk->cong_link_cnt &&
3477 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3478 goto stat_msg_cancel;
3479
3480 if (tsk_conn_cong(tsk) &&
3481 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3482 goto stat_msg_cancel;
3483
3484 nla_nest_end(skb, stat);
3485
3486 if (tsk->group)
3487 if (tipc_group_fill_sock_diag(tsk->group, skb))
3488 goto stat_msg_cancel;
3489
3490 nla_nest_end(skb, attrs);
3491
3492 return 0;
3493
3494 stat_msg_cancel:
3495 nla_nest_cancel(skb, stat);
3496 attr_msg_cancel:
3497 nla_nest_cancel(skb, attrs);
3498 msg_cancel:
3499 return -EMSGSIZE;
3500 }
3501 EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
3502
tipc_nl_sk_dump(struct sk_buff * skb,struct netlink_callback * cb)3503 int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3504 {
3505 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
3506 }
3507
3508 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_add_sk_publ(struct sk_buff * skb,struct netlink_callback * cb,struct publication * publ)3509 static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3510 struct netlink_callback *cb,
3511 struct publication *publ)
3512 {
3513 void *hdr;
3514 struct nlattr *attrs;
3515
3516 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3517 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
3518 if (!hdr)
3519 goto msg_cancel;
3520
3521 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
3522 if (!attrs)
3523 goto genlmsg_cancel;
3524
3525 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3526 goto attr_msg_cancel;
3527 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
3528 goto attr_msg_cancel;
3529 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
3530 goto attr_msg_cancel;
3531 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
3532 goto attr_msg_cancel;
3533
3534 nla_nest_end(skb, attrs);
3535 genlmsg_end(skb, hdr);
3536
3537 return 0;
3538
3539 attr_msg_cancel:
3540 nla_nest_cancel(skb, attrs);
3541 genlmsg_cancel:
3542 genlmsg_cancel(skb, hdr);
3543 msg_cancel:
3544 return -EMSGSIZE;
3545 }
3546
3547 /* Caller should hold socket lock for the passed tipc socket. */
__tipc_nl_list_sk_publ(struct sk_buff * skb,struct netlink_callback * cb,struct tipc_sock * tsk,u32 * last_publ)3548 static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3549 struct netlink_callback *cb,
3550 struct tipc_sock *tsk, u32 *last_publ)
3551 {
3552 int err;
3553 struct publication *p;
3554
3555 if (*last_publ) {
3556 list_for_each_entry(p, &tsk->publications, binding_sock) {
3557 if (p->key == *last_publ)
3558 break;
3559 }
3560 if (p->key != *last_publ) {
3561 /* We never set seq or call nl_dump_check_consistent()
3562 * this means that setting prev_seq here will cause the
3563 * consistence check to fail in the netlink callback
3564 * handler. Resulting in the last NLMSG_DONE message
3565 * having the NLM_F_DUMP_INTR flag set.
3566 */
3567 cb->prev_seq = 1;
3568 *last_publ = 0;
3569 return -EPIPE;
3570 }
3571 } else {
3572 p = list_first_entry(&tsk->publications, struct publication,
3573 binding_sock);
3574 }
3575
3576 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3577 err = __tipc_nl_add_sk_publ(skb, cb, p);
3578 if (err) {
3579 *last_publ = p->key;
3580 return err;
3581 }
3582 }
3583 *last_publ = 0;
3584
3585 return 0;
3586 }
3587
tipc_nl_publ_dump(struct sk_buff * skb,struct netlink_callback * cb)3588 int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3589 {
3590 int err;
3591 u32 tsk_portid = cb->args[0];
3592 u32 last_publ = cb->args[1];
3593 u32 done = cb->args[2];
3594 struct net *net = sock_net(skb->sk);
3595 struct tipc_sock *tsk;
3596
3597 if (!tsk_portid) {
3598 struct nlattr **attrs;
3599 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3600
3601 err = tipc_nlmsg_parse(cb->nlh, &attrs);
3602 if (err)
3603 return err;
3604
3605 if (!attrs[TIPC_NLA_SOCK])
3606 return -EINVAL;
3607
3608 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3609 attrs[TIPC_NLA_SOCK],
3610 tipc_nl_sock_policy, NULL);
3611 if (err)
3612 return err;
3613
3614 if (!sock[TIPC_NLA_SOCK_REF])
3615 return -EINVAL;
3616
3617 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
3618 }
3619
3620 if (done)
3621 return 0;
3622
3623 tsk = tipc_sk_lookup(net, tsk_portid);
3624 if (!tsk)
3625 return -EINVAL;
3626
3627 lock_sock(&tsk->sk);
3628 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3629 if (!err)
3630 done = 1;
3631 release_sock(&tsk->sk);
3632 sock_put(&tsk->sk);
3633
3634 cb->args[0] = tsk_portid;
3635 cb->args[1] = last_publ;
3636 cb->args[2] = done;
3637
3638 return skb->len;
3639 }
3640
3641 /**
3642 * tipc_sk_filtering - check if a socket should be traced
3643 * @sk: the socket to be examined
3644 * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
3645 * (portid, sock type, name type, name lower, name upper)
3646 *
3647 * Returns true if the socket meets the socket tuple data
3648 * (value 0 = 'any') or when there is no tuple set (all = 0),
3649 * otherwise false
3650 */
tipc_sk_filtering(struct sock * sk)3651 bool tipc_sk_filtering(struct sock *sk)
3652 {
3653 struct tipc_sock *tsk;
3654 struct publication *p;
3655 u32 _port, _sktype, _type, _lower, _upper;
3656 u32 type = 0, lower = 0, upper = 0;
3657
3658 if (!sk)
3659 return true;
3660
3661 tsk = tipc_sk(sk);
3662
3663 _port = sysctl_tipc_sk_filter[0];
3664 _sktype = sysctl_tipc_sk_filter[1];
3665 _type = sysctl_tipc_sk_filter[2];
3666 _lower = sysctl_tipc_sk_filter[3];
3667 _upper = sysctl_tipc_sk_filter[4];
3668
3669 if (!_port && !_sktype && !_type && !_lower && !_upper)
3670 return true;
3671
3672 if (_port)
3673 return (_port == tsk->portid);
3674
3675 if (_sktype && _sktype != sk->sk_type)
3676 return false;
3677
3678 if (tsk->published) {
3679 p = list_first_entry_or_null(&tsk->publications,
3680 struct publication, binding_sock);
3681 if (p) {
3682 type = p->type;
3683 lower = p->lower;
3684 upper = p->upper;
3685 }
3686 }
3687
3688 if (!tipc_sk_type_connectionless(sk)) {
3689 type = tsk->conn_type;
3690 lower = tsk->conn_instance;
3691 upper = tsk->conn_instance;
3692 }
3693
3694 if ((_type && _type != type) || (_lower && _lower != lower) ||
3695 (_upper && _upper != upper))
3696 return false;
3697
3698 return true;
3699 }
3700
tipc_sock_get_portid(struct sock * sk)3701 u32 tipc_sock_get_portid(struct sock *sk)
3702 {
3703 return (sk) ? (tipc_sk(sk))->portid : 0;
3704 }
3705
3706 /**
3707 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3708 * both the rcv and backlog queues are considered
3709 * @sk: tipc sk to be checked
3710 * @skb: tipc msg to be checked
3711 *
3712 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3713 */
3714
tipc_sk_overlimit1(struct sock * sk,struct sk_buff * skb)3715 bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3716 {
3717 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3718 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3719 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3720
3721 return (qsize > lim * 90 / 100);
3722 }
3723
3724 /**
3725 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3726 * only the rcv queue is considered
3727 * @sk: tipc sk to be checked
3728 * @skb: tipc msg to be checked
3729 *
3730 * Returns true if the socket rx queue allocation is > 90%, otherwise false
3731 */
3732
tipc_sk_overlimit2(struct sock * sk,struct sk_buff * skb)3733 bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3734 {
3735 unsigned int lim = rcvbuf_limit(sk, skb);
3736 unsigned int qsize = sk_rmem_alloc_get(sk);
3737
3738 return (qsize > lim * 90 / 100);
3739 }
3740
3741 /**
3742 * tipc_sk_dump - dump TIPC socket
3743 * @sk: tipc sk to be dumped
3744 * @dqueues: bitmask to decide if any socket queue to be dumped?
3745 * - TIPC_DUMP_NONE: don't dump socket queues
3746 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3747 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3748 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3749 * - TIPC_DUMP_ALL: dump all the socket queues above
3750 * @buf: returned buffer of dump data in format
3751 */
tipc_sk_dump(struct sock * sk,u16 dqueues,char * buf)3752 int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3753 {
3754 int i = 0;
3755 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3756 struct tipc_sock *tsk;
3757 struct publication *p;
3758 bool tsk_connected;
3759
3760 if (!sk) {
3761 i += scnprintf(buf, sz, "sk data: (null)\n");
3762 return i;
3763 }
3764
3765 tsk = tipc_sk(sk);
3766 tsk_connected = !tipc_sk_type_connectionless(sk);
3767
3768 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3769 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3770 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3771 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3772 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3773 if (tsk_connected) {
3774 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3775 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3776 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3777 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3778 }
3779 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3780 if (tsk->published) {
3781 p = list_first_entry_or_null(&tsk->publications,
3782 struct publication, binding_sock);
3783 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
3784 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
3785 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
3786 }
3787 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3788 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3789 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3790 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3791 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3792 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3793 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3794 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3795 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3796 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3797 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3798 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3799 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
3800 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
3801
3802 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3803 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3804 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3805 }
3806
3807 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3808 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3809 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3810 }
3811
3812 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3813 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3814 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3815 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
3816 i += scnprintf(buf + i, sz - i, " tail ");
3817 i += tipc_skb_dump(sk->sk_backlog.tail, false,
3818 buf + i);
3819 }
3820 }
3821
3822 return i;
3823 }
3824