1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46
47 #include <linux/pkt_sched.h>
48
49 struct tipc_stats {
50 u32 sent_pkts;
51 u32 recv_pkts;
52 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77 };
78
79 /**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124 struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
127 struct net *net;
128
129 /* Management and link supervision data */
130 u32 peer_session;
131 u32 session;
132 u32 peer_bearer_id;
133 u32 bearer_id;
134 u32 tolerance;
135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
140 char if_name[TIPC_MAX_IF_NAME];
141 u32 priority;
142 char net_plane;
143 struct tipc_mon_state mon_state;
144 u16 rst_cnt;
145
146 /* Failover/synch */
147 u16 drop_point;
148 struct sk_buff *failover_reasm_skb;
149
150 /* Max packet negotiation */
151 u16 mtu;
152 u16 advertised_mtu;
153
154 /* Sending */
155 struct sk_buff_head transmq;
156 struct sk_buff_head backlogq;
157 struct {
158 u16 len;
159 u16 limit;
160 struct sk_buff *target_bskb;
161 } backlog[5];
162 u16 snd_nxt;
163 u16 last_retransm;
164 u16 window;
165 u32 stale_count;
166
167 /* Reception */
168 u16 rcv_nxt;
169 u32 rcv_unacked;
170 struct sk_buff_head deferdq;
171 struct sk_buff_head *inputq;
172 struct sk_buff_head *namedq;
173
174 /* Congestion handling */
175 struct sk_buff_head wakeupq;
176
177 /* Fragmentation/reassembly */
178 struct sk_buff *reasm_buf;
179
180 /* Broadcast */
181 u16 ackers;
182 u16 acked;
183 struct tipc_link *bc_rcvlink;
184 struct tipc_link *bc_sndlink;
185 unsigned long prev_retr;
186 u16 prev_from;
187 u16 prev_to;
188 u8 nack_state;
189 bool bc_peer_is_up;
190
191 /* Statistics */
192 struct tipc_stats stats;
193 };
194
195 /*
196 * Error message prefixes
197 */
198 static const char *link_co_err = "Link tunneling error, ";
199 static const char *link_rst_msg = "Resetting link ";
200
201 /* Send states for broadcast NACKs
202 */
203 enum {
204 BC_NACK_SND_CONDITIONAL,
205 BC_NACK_SND_UNCONDITIONAL,
206 BC_NACK_SND_SUPPRESS,
207 };
208
209 #define TIPC_BC_RETR_LIMIT 10 /* [ms] */
210
211 /*
212 * Interval between NACKs when packets arrive out of order
213 */
214 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
215
216 /* Wildcard value for link session numbers. When it is known that
217 * peer endpoint is down, any session number must be accepted.
218 */
219 #define ANY_SESSION 0x10000
220
221 /* Link FSM states:
222 */
223 enum {
224 LINK_ESTABLISHED = 0xe,
225 LINK_ESTABLISHING = 0xe << 4,
226 LINK_RESET = 0x1 << 8,
227 LINK_RESETTING = 0x2 << 12,
228 LINK_PEER_RESET = 0xd << 16,
229 LINK_FAILINGOVER = 0xf << 20,
230 LINK_SYNCHING = 0xc << 24
231 };
232
233 /* Link FSM state checking routines
234 */
link_is_up(struct tipc_link * l)235 static int link_is_up(struct tipc_link *l)
236 {
237 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
238 }
239
240 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
241 struct sk_buff_head *xmitq);
242 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
243 u16 rcvgap, int tolerance, int priority,
244 struct sk_buff_head *xmitq);
245 static void link_print(struct tipc_link *l, const char *str);
246 static int tipc_link_build_nack_msg(struct tipc_link *l,
247 struct sk_buff_head *xmitq);
248 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 struct sk_buff_head *xmitq);
250 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
251
252 /*
253 * Simple non-static link routines (i.e. referenced outside this file)
254 */
tipc_link_is_up(struct tipc_link * l)255 bool tipc_link_is_up(struct tipc_link *l)
256 {
257 return link_is_up(l);
258 }
259
tipc_link_peer_is_down(struct tipc_link * l)260 bool tipc_link_peer_is_down(struct tipc_link *l)
261 {
262 return l->state == LINK_PEER_RESET;
263 }
264
tipc_link_is_reset(struct tipc_link * l)265 bool tipc_link_is_reset(struct tipc_link *l)
266 {
267 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
268 }
269
tipc_link_is_establishing(struct tipc_link * l)270 bool tipc_link_is_establishing(struct tipc_link *l)
271 {
272 return l->state == LINK_ESTABLISHING;
273 }
274
tipc_link_is_synching(struct tipc_link * l)275 bool tipc_link_is_synching(struct tipc_link *l)
276 {
277 return l->state == LINK_SYNCHING;
278 }
279
tipc_link_is_failingover(struct tipc_link * l)280 bool tipc_link_is_failingover(struct tipc_link *l)
281 {
282 return l->state == LINK_FAILINGOVER;
283 }
284
tipc_link_is_blocked(struct tipc_link * l)285 bool tipc_link_is_blocked(struct tipc_link *l)
286 {
287 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
288 }
289
link_is_bc_sndlink(struct tipc_link * l)290 static bool link_is_bc_sndlink(struct tipc_link *l)
291 {
292 return !l->bc_sndlink;
293 }
294
link_is_bc_rcvlink(struct tipc_link * l)295 static bool link_is_bc_rcvlink(struct tipc_link *l)
296 {
297 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
298 }
299
tipc_link_is_active(struct tipc_link * l)300 int tipc_link_is_active(struct tipc_link *l)
301 {
302 return l->active;
303 }
304
tipc_link_set_active(struct tipc_link * l,bool active)305 void tipc_link_set_active(struct tipc_link *l, bool active)
306 {
307 l->active = active;
308 }
309
tipc_link_id(struct tipc_link * l)310 u32 tipc_link_id(struct tipc_link *l)
311 {
312 return l->peer_bearer_id << 16 | l->bearer_id;
313 }
314
tipc_link_window(struct tipc_link * l)315 int tipc_link_window(struct tipc_link *l)
316 {
317 return l->window;
318 }
319
tipc_link_prio(struct tipc_link * l)320 int tipc_link_prio(struct tipc_link *l)
321 {
322 return l->priority;
323 }
324
tipc_link_tolerance(struct tipc_link * l)325 unsigned long tipc_link_tolerance(struct tipc_link *l)
326 {
327 return l->tolerance;
328 }
329
tipc_link_inputq(struct tipc_link * l)330 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
331 {
332 return l->inputq;
333 }
334
tipc_link_plane(struct tipc_link * l)335 char tipc_link_plane(struct tipc_link *l)
336 {
337 return l->net_plane;
338 }
339
tipc_link_add_bc_peer(struct tipc_link * snd_l,struct tipc_link * uc_l,struct sk_buff_head * xmitq)340 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 struct tipc_link *uc_l,
342 struct sk_buff_head *xmitq)
343 {
344 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
345
346 snd_l->ackers++;
347 rcv_l->acked = snd_l->snd_nxt - 1;
348 snd_l->state = LINK_ESTABLISHED;
349 tipc_link_build_bc_init_msg(uc_l, xmitq);
350 }
351
tipc_link_remove_bc_peer(struct tipc_link * snd_l,struct tipc_link * rcv_l,struct sk_buff_head * xmitq)352 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 struct tipc_link *rcv_l,
354 struct sk_buff_head *xmitq)
355 {
356 u16 ack = snd_l->snd_nxt - 1;
357
358 snd_l->ackers--;
359 rcv_l->bc_peer_is_up = true;
360 rcv_l->state = LINK_ESTABLISHED;
361 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 tipc_link_reset(rcv_l);
363 rcv_l->state = LINK_RESET;
364 if (!snd_l->ackers) {
365 tipc_link_reset(snd_l);
366 snd_l->state = LINK_RESET;
367 __skb_queue_purge(xmitq);
368 }
369 }
370
tipc_link_bc_peers(struct tipc_link * l)371 int tipc_link_bc_peers(struct tipc_link *l)
372 {
373 return l->ackers;
374 }
375
link_bc_rcv_gap(struct tipc_link * l)376 u16 link_bc_rcv_gap(struct tipc_link *l)
377 {
378 struct sk_buff *skb = skb_peek(&l->deferdq);
379 u16 gap = 0;
380
381 if (more(l->snd_nxt, l->rcv_nxt))
382 gap = l->snd_nxt - l->rcv_nxt;
383 if (skb)
384 gap = buf_seqno(skb) - l->rcv_nxt;
385 return gap;
386 }
387
tipc_link_set_mtu(struct tipc_link * l,int mtu)388 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
389 {
390 l->mtu = mtu;
391 }
392
tipc_link_mtu(struct tipc_link * l)393 int tipc_link_mtu(struct tipc_link *l)
394 {
395 return l->mtu;
396 }
397
tipc_link_rcv_nxt(struct tipc_link * l)398 u16 tipc_link_rcv_nxt(struct tipc_link *l)
399 {
400 return l->rcv_nxt;
401 }
402
tipc_link_acked(struct tipc_link * l)403 u16 tipc_link_acked(struct tipc_link *l)
404 {
405 return l->acked;
406 }
407
tipc_link_name(struct tipc_link * l)408 char *tipc_link_name(struct tipc_link *l)
409 {
410 return l->name;
411 }
412
413 /**
414 * tipc_link_create - create a new link
415 * @n: pointer to associated node
416 * @if_name: associated interface name
417 * @bearer_id: id (index) of associated bearer
418 * @tolerance: link tolerance to be used by link
419 * @net_plane: network plane (A,B,c..) this link belongs to
420 * @mtu: mtu to be advertised by link
421 * @priority: priority to be used by link
422 * @window: send window to be used by link
423 * @session: session to be used by link
424 * @ownnode: identity of own node
425 * @peer: node id of peer node
426 * @peer_caps: bitmap describing peer node capabilities
427 * @bc_sndlink: the namespace global link used for broadcast sending
428 * @bc_rcvlink: the peer specific link used for broadcast reception
429 * @inputq: queue to put messages ready for delivery
430 * @namedq: queue to put binding table update messages ready for delivery
431 * @link: return value, pointer to put the created link
432 *
433 * Returns true if link was created, otherwise false
434 */
tipc_link_create(struct net * net,char * if_name,int bearer_id,int tolerance,char net_plane,u32 mtu,int priority,int window,u32 session,u32 ownnode,u32 peer,u16 peer_caps,struct tipc_link * bc_sndlink,struct tipc_link * bc_rcvlink,struct sk_buff_head * inputq,struct sk_buff_head * namedq,struct tipc_link ** link)435 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
436 int tolerance, char net_plane, u32 mtu, int priority,
437 int window, u32 session, u32 ownnode, u32 peer,
438 u16 peer_caps,
439 struct tipc_link *bc_sndlink,
440 struct tipc_link *bc_rcvlink,
441 struct sk_buff_head *inputq,
442 struct sk_buff_head *namedq,
443 struct tipc_link **link)
444 {
445 struct tipc_link *l;
446
447 l = kzalloc(sizeof(*l), GFP_ATOMIC);
448 if (!l)
449 return false;
450 *link = l;
451 l->session = session;
452
453 /* Note: peer i/f name is completed by reset/activate message */
454 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
455 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
456 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
457 strcpy(l->if_name, if_name);
458 l->addr = peer;
459 l->peer_caps = peer_caps;
460 l->net = net;
461 l->peer_session = ANY_SESSION;
462 l->bearer_id = bearer_id;
463 l->tolerance = tolerance;
464 l->net_plane = net_plane;
465 l->advertised_mtu = mtu;
466 l->mtu = mtu;
467 l->priority = priority;
468 tipc_link_set_queue_limits(l, window);
469 l->ackers = 1;
470 l->bc_sndlink = bc_sndlink;
471 l->bc_rcvlink = bc_rcvlink;
472 l->inputq = inputq;
473 l->namedq = namedq;
474 l->state = LINK_RESETTING;
475 __skb_queue_head_init(&l->transmq);
476 __skb_queue_head_init(&l->backlogq);
477 __skb_queue_head_init(&l->deferdq);
478 skb_queue_head_init(&l->wakeupq);
479 skb_queue_head_init(l->inputq);
480 return true;
481 }
482
483 /**
484 * tipc_link_bc_create - create new link to be used for broadcast
485 * @n: pointer to associated node
486 * @mtu: mtu to be used
487 * @window: send window to be used
488 * @inputq: queue to put messages ready for delivery
489 * @namedq: queue to put binding table update messages ready for delivery
490 * @link: return value, pointer to put the created link
491 *
492 * Returns true if link was created, otherwise false
493 */
tipc_link_bc_create(struct net * net,u32 ownnode,u32 peer,int mtu,int window,u16 peer_caps,struct sk_buff_head * inputq,struct sk_buff_head * namedq,struct tipc_link * bc_sndlink,struct tipc_link ** link)494 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
495 int mtu, int window, u16 peer_caps,
496 struct sk_buff_head *inputq,
497 struct sk_buff_head *namedq,
498 struct tipc_link *bc_sndlink,
499 struct tipc_link **link)
500 {
501 struct tipc_link *l;
502
503 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
504 0, ownnode, peer, peer_caps, bc_sndlink,
505 NULL, inputq, namedq, link))
506 return false;
507
508 l = *link;
509 strcpy(l->name, tipc_bclink_name);
510 tipc_link_reset(l);
511 l->state = LINK_RESET;
512 l->ackers = 0;
513 l->bc_rcvlink = l;
514
515 /* Broadcast send link is always up */
516 if (link_is_bc_sndlink(l))
517 l->state = LINK_ESTABLISHED;
518
519 /* Disable replicast if even a single peer doesn't support it */
520 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
521 tipc_bcast_disable_rcast(net);
522
523 return true;
524 }
525
526 /**
527 * tipc_link_fsm_evt - link finite state machine
528 * @l: pointer to link
529 * @evt: state machine event to be processed
530 */
tipc_link_fsm_evt(struct tipc_link * l,int evt)531 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
532 {
533 int rc = 0;
534
535 switch (l->state) {
536 case LINK_RESETTING:
537 switch (evt) {
538 case LINK_PEER_RESET_EVT:
539 l->state = LINK_PEER_RESET;
540 break;
541 case LINK_RESET_EVT:
542 l->state = LINK_RESET;
543 break;
544 case LINK_FAILURE_EVT:
545 case LINK_FAILOVER_BEGIN_EVT:
546 case LINK_ESTABLISH_EVT:
547 case LINK_FAILOVER_END_EVT:
548 case LINK_SYNCH_BEGIN_EVT:
549 case LINK_SYNCH_END_EVT:
550 default:
551 goto illegal_evt;
552 }
553 break;
554 case LINK_RESET:
555 switch (evt) {
556 case LINK_PEER_RESET_EVT:
557 l->state = LINK_ESTABLISHING;
558 break;
559 case LINK_FAILOVER_BEGIN_EVT:
560 l->state = LINK_FAILINGOVER;
561 case LINK_FAILURE_EVT:
562 case LINK_RESET_EVT:
563 case LINK_ESTABLISH_EVT:
564 case LINK_FAILOVER_END_EVT:
565 break;
566 case LINK_SYNCH_BEGIN_EVT:
567 case LINK_SYNCH_END_EVT:
568 default:
569 goto illegal_evt;
570 }
571 break;
572 case LINK_PEER_RESET:
573 switch (evt) {
574 case LINK_RESET_EVT:
575 l->state = LINK_ESTABLISHING;
576 break;
577 case LINK_PEER_RESET_EVT:
578 case LINK_ESTABLISH_EVT:
579 case LINK_FAILURE_EVT:
580 break;
581 case LINK_SYNCH_BEGIN_EVT:
582 case LINK_SYNCH_END_EVT:
583 case LINK_FAILOVER_BEGIN_EVT:
584 case LINK_FAILOVER_END_EVT:
585 default:
586 goto illegal_evt;
587 }
588 break;
589 case LINK_FAILINGOVER:
590 switch (evt) {
591 case LINK_FAILOVER_END_EVT:
592 l->state = LINK_RESET;
593 break;
594 case LINK_PEER_RESET_EVT:
595 case LINK_RESET_EVT:
596 case LINK_ESTABLISH_EVT:
597 case LINK_FAILURE_EVT:
598 break;
599 case LINK_FAILOVER_BEGIN_EVT:
600 case LINK_SYNCH_BEGIN_EVT:
601 case LINK_SYNCH_END_EVT:
602 default:
603 goto illegal_evt;
604 }
605 break;
606 case LINK_ESTABLISHING:
607 switch (evt) {
608 case LINK_ESTABLISH_EVT:
609 l->state = LINK_ESTABLISHED;
610 break;
611 case LINK_FAILOVER_BEGIN_EVT:
612 l->state = LINK_FAILINGOVER;
613 break;
614 case LINK_RESET_EVT:
615 l->state = LINK_RESET;
616 break;
617 case LINK_FAILURE_EVT:
618 case LINK_PEER_RESET_EVT:
619 case LINK_SYNCH_BEGIN_EVT:
620 case LINK_FAILOVER_END_EVT:
621 break;
622 case LINK_SYNCH_END_EVT:
623 default:
624 goto illegal_evt;
625 }
626 break;
627 case LINK_ESTABLISHED:
628 switch (evt) {
629 case LINK_PEER_RESET_EVT:
630 l->state = LINK_PEER_RESET;
631 rc |= TIPC_LINK_DOWN_EVT;
632 break;
633 case LINK_FAILURE_EVT:
634 l->state = LINK_RESETTING;
635 rc |= TIPC_LINK_DOWN_EVT;
636 break;
637 case LINK_RESET_EVT:
638 l->state = LINK_RESET;
639 break;
640 case LINK_ESTABLISH_EVT:
641 case LINK_SYNCH_END_EVT:
642 break;
643 case LINK_SYNCH_BEGIN_EVT:
644 l->state = LINK_SYNCHING;
645 break;
646 case LINK_FAILOVER_BEGIN_EVT:
647 case LINK_FAILOVER_END_EVT:
648 default:
649 goto illegal_evt;
650 }
651 break;
652 case LINK_SYNCHING:
653 switch (evt) {
654 case LINK_PEER_RESET_EVT:
655 l->state = LINK_PEER_RESET;
656 rc |= TIPC_LINK_DOWN_EVT;
657 break;
658 case LINK_FAILURE_EVT:
659 l->state = LINK_RESETTING;
660 rc |= TIPC_LINK_DOWN_EVT;
661 break;
662 case LINK_RESET_EVT:
663 l->state = LINK_RESET;
664 break;
665 case LINK_ESTABLISH_EVT:
666 case LINK_SYNCH_BEGIN_EVT:
667 break;
668 case LINK_SYNCH_END_EVT:
669 l->state = LINK_ESTABLISHED;
670 break;
671 case LINK_FAILOVER_BEGIN_EVT:
672 case LINK_FAILOVER_END_EVT:
673 default:
674 goto illegal_evt;
675 }
676 break;
677 default:
678 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
679 }
680 return rc;
681 illegal_evt:
682 pr_err("Illegal FSM event %x in state %x on link %s\n",
683 evt, l->state, l->name);
684 return rc;
685 }
686
687 /* link_profile_stats - update statistical profiling of traffic
688 */
link_profile_stats(struct tipc_link * l)689 static void link_profile_stats(struct tipc_link *l)
690 {
691 struct sk_buff *skb;
692 struct tipc_msg *msg;
693 int length;
694
695 /* Update counters used in statistical profiling of send traffic */
696 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
697 l->stats.queue_sz_counts++;
698
699 skb = skb_peek(&l->transmq);
700 if (!skb)
701 return;
702 msg = buf_msg(skb);
703 length = msg_size(msg);
704
705 if (msg_user(msg) == MSG_FRAGMENTER) {
706 if (msg_type(msg) != FIRST_FRAGMENT)
707 return;
708 length = msg_size(msg_get_wrapped(msg));
709 }
710 l->stats.msg_lengths_total += length;
711 l->stats.msg_length_counts++;
712 if (length <= 64)
713 l->stats.msg_length_profile[0]++;
714 else if (length <= 256)
715 l->stats.msg_length_profile[1]++;
716 else if (length <= 1024)
717 l->stats.msg_length_profile[2]++;
718 else if (length <= 4096)
719 l->stats.msg_length_profile[3]++;
720 else if (length <= 16384)
721 l->stats.msg_length_profile[4]++;
722 else if (length <= 32768)
723 l->stats.msg_length_profile[5]++;
724 else
725 l->stats.msg_length_profile[6]++;
726 }
727
728 /* tipc_link_timeout - perform periodic task as instructed from node timeout
729 */
tipc_link_timeout(struct tipc_link * l,struct sk_buff_head * xmitq)730 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
731 {
732 int mtyp = 0;
733 int rc = 0;
734 bool state = false;
735 bool probe = false;
736 bool setup = false;
737 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
738 u16 bc_acked = l->bc_rcvlink->acked;
739 struct tipc_mon_state *mstate = &l->mon_state;
740
741 switch (l->state) {
742 case LINK_ESTABLISHED:
743 case LINK_SYNCHING:
744 mtyp = STATE_MSG;
745 link_profile_stats(l);
746 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
747 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
748 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
749 state = bc_acked != bc_snt;
750 state |= l->bc_rcvlink->rcv_unacked;
751 state |= l->rcv_unacked;
752 state |= !skb_queue_empty(&l->transmq);
753 state |= !skb_queue_empty(&l->deferdq);
754 probe = mstate->probing;
755 probe |= l->silent_intv_cnt;
756 if (probe || mstate->monitoring)
757 l->silent_intv_cnt++;
758 break;
759 case LINK_RESET:
760 setup = l->rst_cnt++ <= 4;
761 setup |= !(l->rst_cnt % 16);
762 mtyp = RESET_MSG;
763 break;
764 case LINK_ESTABLISHING:
765 setup = true;
766 mtyp = ACTIVATE_MSG;
767 break;
768 case LINK_PEER_RESET:
769 case LINK_RESETTING:
770 case LINK_FAILINGOVER:
771 break;
772 default:
773 break;
774 }
775
776 if (state || probe || setup)
777 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
778
779 return rc;
780 }
781
782 /**
783 * link_schedule_user - schedule a message sender for wakeup after congestion
784 * @l: congested link
785 * @hdr: header of message that is being sent
786 * Create pseudo msg to send back to user when congestion abates
787 */
link_schedule_user(struct tipc_link * l,struct tipc_msg * hdr)788 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
789 {
790 u32 dnode = tipc_own_addr(l->net);
791 u32 dport = msg_origport(hdr);
792 struct sk_buff *skb;
793
794 /* Create and schedule wakeup pseudo message */
795 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
796 dnode, l->addr, dport, 0, 0);
797 if (!skb)
798 return -ENOBUFS;
799 msg_set_dest_droppable(buf_msg(skb), true);
800 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
801 skb_queue_tail(&l->wakeupq, skb);
802 l->stats.link_congs++;
803 return -ELINKCONG;
804 }
805
806 /**
807 * link_prepare_wakeup - prepare users for wakeup after congestion
808 * @l: congested link
809 * Wake up a number of waiting users, as permitted by available space
810 * in the send queue
811 */
link_prepare_wakeup(struct tipc_link * l)812 void link_prepare_wakeup(struct tipc_link *l)
813 {
814 struct sk_buff_head *wakeupq = &l->wakeupq;
815 struct sk_buff_head *inputq = l->inputq;
816 struct sk_buff *skb, *tmp;
817 struct sk_buff_head tmpq;
818 int avail[5] = {0,};
819 int imp = 0;
820
821 __skb_queue_head_init(&tmpq);
822
823 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
824 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
825
826 skb_queue_walk_safe(wakeupq, skb, tmp) {
827 imp = TIPC_SKB_CB(skb)->chain_imp;
828 if (avail[imp] <= 0)
829 continue;
830 avail[imp]--;
831 __skb_unlink(skb, wakeupq);
832 __skb_queue_tail(&tmpq, skb);
833 }
834
835 spin_lock_bh(&inputq->lock);
836 skb_queue_splice_tail(&tmpq, inputq);
837 spin_unlock_bh(&inputq->lock);
838
839 }
840
tipc_link_reset(struct tipc_link * l)841 void tipc_link_reset(struct tipc_link *l)
842 {
843 u32 imp;
844
845 l->peer_session = ANY_SESSION;
846 l->session++;
847 l->mtu = l->advertised_mtu;
848 __skb_queue_purge(&l->transmq);
849 __skb_queue_purge(&l->deferdq);
850 skb_queue_splice_init(&l->wakeupq, l->inputq);
851 __skb_queue_purge(&l->backlogq);
852 for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
853 l->backlog[imp].len = 0;
854 l->backlog[imp].target_bskb = NULL;
855 }
856 kfree_skb(l->reasm_buf);
857 kfree_skb(l->failover_reasm_skb);
858 l->reasm_buf = NULL;
859 l->failover_reasm_skb = NULL;
860 l->rcv_unacked = 0;
861 l->snd_nxt = 1;
862 l->rcv_nxt = 1;
863 l->acked = 0;
864 l->silent_intv_cnt = 0;
865 l->rst_cnt = 0;
866 l->stale_count = 0;
867 l->bc_peer_is_up = false;
868 memset(&l->mon_state, 0, sizeof(l->mon_state));
869 tipc_link_reset_stats(l);
870 }
871
872 /**
873 * tipc_link_xmit(): enqueue buffer list according to queue situation
874 * @link: link to use
875 * @list: chain of buffers containing message
876 * @xmitq: returned list of packets to be sent by caller
877 *
878 * Consumes the buffer chain.
879 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
880 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
881 */
tipc_link_xmit(struct tipc_link * l,struct sk_buff_head * list,struct sk_buff_head * xmitq)882 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
883 struct sk_buff_head *xmitq)
884 {
885 struct tipc_msg *hdr = buf_msg(skb_peek(list));
886 unsigned int maxwin = l->window;
887 int imp = msg_importance(hdr);
888 unsigned int mtu = l->mtu;
889 u16 ack = l->rcv_nxt - 1;
890 u16 seqno = l->snd_nxt;
891 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
892 struct sk_buff_head *transmq = &l->transmq;
893 struct sk_buff_head *backlogq = &l->backlogq;
894 struct sk_buff *skb, *_skb, **tskb;
895 int pkt_cnt = skb_queue_len(list);
896 int rc = 0;
897
898 if (unlikely(msg_size(hdr) > mtu)) {
899 skb_queue_purge(list);
900 return -EMSGSIZE;
901 }
902
903 /* Allow oversubscription of one data msg per source at congestion */
904 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
905 if (imp == TIPC_SYSTEM_IMPORTANCE) {
906 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
907 return -ENOBUFS;
908 }
909 rc = link_schedule_user(l, hdr);
910 }
911
912 if (pkt_cnt > 1) {
913 l->stats.sent_fragmented++;
914 l->stats.sent_fragments += pkt_cnt;
915 }
916
917 /* Prepare each packet for sending, and add to relevant queue: */
918 while (skb_queue_len(list)) {
919 skb = skb_peek(list);
920 hdr = buf_msg(skb);
921 msg_set_seqno(hdr, seqno);
922 msg_set_ack(hdr, ack);
923 msg_set_bcast_ack(hdr, bc_ack);
924
925 if (likely(skb_queue_len(transmq) < maxwin)) {
926 _skb = skb_clone(skb, GFP_ATOMIC);
927 if (!_skb) {
928 skb_queue_purge(list);
929 return -ENOBUFS;
930 }
931 __skb_dequeue(list);
932 __skb_queue_tail(transmq, skb);
933 __skb_queue_tail(xmitq, _skb);
934 TIPC_SKB_CB(skb)->ackers = l->ackers;
935 l->rcv_unacked = 0;
936 l->stats.sent_pkts++;
937 seqno++;
938 continue;
939 }
940 tskb = &l->backlog[imp].target_bskb;
941 if (tipc_msg_bundle(*tskb, hdr, mtu)) {
942 kfree_skb(__skb_dequeue(list));
943 l->stats.sent_bundled++;
944 continue;
945 }
946 if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
947 kfree_skb(__skb_dequeue(list));
948 __skb_queue_tail(backlogq, *tskb);
949 l->backlog[imp].len++;
950 l->stats.sent_bundled++;
951 l->stats.sent_bundles++;
952 continue;
953 }
954 l->backlog[imp].target_bskb = NULL;
955 l->backlog[imp].len += skb_queue_len(list);
956 skb_queue_splice_tail_init(list, backlogq);
957 }
958 l->snd_nxt = seqno;
959 return rc;
960 }
961
tipc_link_advance_backlog(struct tipc_link * l,struct sk_buff_head * xmitq)962 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
963 {
964 struct sk_buff *skb, *_skb;
965 struct tipc_msg *hdr;
966 u16 seqno = l->snd_nxt;
967 u16 ack = l->rcv_nxt - 1;
968 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
969 u32 imp;
970
971 while (skb_queue_len(&l->transmq) < l->window) {
972 skb = skb_peek(&l->backlogq);
973 if (!skb)
974 break;
975 _skb = skb_clone(skb, GFP_ATOMIC);
976 if (!_skb)
977 break;
978 __skb_dequeue(&l->backlogq);
979 hdr = buf_msg(skb);
980 imp = msg_importance(hdr);
981 l->backlog[imp].len--;
982 if (unlikely(skb == l->backlog[imp].target_bskb))
983 l->backlog[imp].target_bskb = NULL;
984 __skb_queue_tail(&l->transmq, skb);
985 __skb_queue_tail(xmitq, _skb);
986 TIPC_SKB_CB(skb)->ackers = l->ackers;
987 msg_set_seqno(hdr, seqno);
988 msg_set_ack(hdr, ack);
989 msg_set_bcast_ack(hdr, bc_ack);
990 l->rcv_unacked = 0;
991 l->stats.sent_pkts++;
992 seqno++;
993 }
994 l->snd_nxt = seqno;
995 }
996
link_retransmit_failure(struct tipc_link * l,struct sk_buff * skb)997 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
998 {
999 struct tipc_msg *hdr = buf_msg(skb);
1000
1001 pr_warn("Retransmission failure on link <%s>\n", l->name);
1002 link_print(l, "State of link ");
1003 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1004 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1005 pr_info("sqno %u, prev: %x, src: %x\n",
1006 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1007 }
1008
tipc_link_retrans(struct tipc_link * l,struct tipc_link * nacker,u16 from,u16 to,struct sk_buff_head * xmitq)1009 int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
1010 u16 from, u16 to, struct sk_buff_head *xmitq)
1011 {
1012 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1013 struct tipc_msg *hdr;
1014 u16 ack = l->rcv_nxt - 1;
1015 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1016
1017 if (!skb)
1018 return 0;
1019
1020 /* Detect repeated retransmit failures on same packet */
1021 if (nacker->last_retransm != buf_seqno(skb)) {
1022 nacker->last_retransm = buf_seqno(skb);
1023 nacker->stale_count = 1;
1024 } else if (++nacker->stale_count > 100) {
1025 link_retransmit_failure(l, skb);
1026 nacker->stale_count = 0;
1027 if (link_is_bc_sndlink(l))
1028 return TIPC_LINK_DOWN_EVT;
1029 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1030 }
1031
1032 /* Move forward to where retransmission should start */
1033 skb_queue_walk(&l->transmq, skb) {
1034 if (!less(buf_seqno(skb), from))
1035 break;
1036 }
1037
1038 skb_queue_walk_from(&l->transmq, skb) {
1039 if (more(buf_seqno(skb), to))
1040 break;
1041 hdr = buf_msg(skb);
1042 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1043 if (!_skb)
1044 return 0;
1045 hdr = buf_msg(_skb);
1046 msg_set_ack(hdr, ack);
1047 msg_set_bcast_ack(hdr, bc_ack);
1048 _skb->priority = TC_PRIO_CONTROL;
1049 __skb_queue_tail(xmitq, _skb);
1050 l->stats.retransmitted++;
1051 }
1052 return 0;
1053 }
1054
1055 /* tipc_data_input - deliver data and name distr msgs to upper layer
1056 *
1057 * Consumes buffer if message is of right type
1058 * Node lock must be held
1059 */
tipc_data_input(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * inputq)1060 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1061 struct sk_buff_head *inputq)
1062 {
1063 struct tipc_msg *hdr = buf_msg(skb);
1064
1065 switch (msg_user(hdr)) {
1066 case TIPC_LOW_IMPORTANCE:
1067 case TIPC_MEDIUM_IMPORTANCE:
1068 case TIPC_HIGH_IMPORTANCE:
1069 case TIPC_CRITICAL_IMPORTANCE:
1070 if (unlikely(msg_type(hdr) == TIPC_MCAST_MSG)) {
1071 skb_queue_tail(l->bc_rcvlink->inputq, skb);
1072 return true;
1073 }
1074 case CONN_MANAGER:
1075 skb_queue_tail(inputq, skb);
1076 return true;
1077 case NAME_DISTRIBUTOR:
1078 l->bc_rcvlink->state = LINK_ESTABLISHED;
1079 skb_queue_tail(l->namedq, skb);
1080 return true;
1081 case MSG_BUNDLER:
1082 case TUNNEL_PROTOCOL:
1083 case MSG_FRAGMENTER:
1084 case BCAST_PROTOCOL:
1085 return false;
1086 default:
1087 pr_warn("Dropping received illegal msg type\n");
1088 kfree_skb(skb);
1089 return true;
1090 };
1091 }
1092
1093 /* tipc_link_input - process packet that has passed link protocol check
1094 *
1095 * Consumes buffer
1096 */
tipc_link_input(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * inputq)1097 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1098 struct sk_buff_head *inputq)
1099 {
1100 struct tipc_msg *hdr = buf_msg(skb);
1101 struct sk_buff **reasm_skb = &l->reasm_buf;
1102 struct sk_buff *iskb;
1103 struct sk_buff_head tmpq;
1104 int usr = msg_user(hdr);
1105 int rc = 0;
1106 int pos = 0;
1107 int ipos = 0;
1108
1109 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1110 if (msg_type(hdr) == SYNCH_MSG) {
1111 __skb_queue_purge(&l->deferdq);
1112 goto drop;
1113 }
1114 if (!tipc_msg_extract(skb, &iskb, &ipos))
1115 return rc;
1116 kfree_skb(skb);
1117 skb = iskb;
1118 hdr = buf_msg(skb);
1119 if (less(msg_seqno(hdr), l->drop_point))
1120 goto drop;
1121 if (tipc_data_input(l, skb, inputq))
1122 return rc;
1123 usr = msg_user(hdr);
1124 reasm_skb = &l->failover_reasm_skb;
1125 }
1126
1127 if (usr == MSG_BUNDLER) {
1128 skb_queue_head_init(&tmpq);
1129 l->stats.recv_bundles++;
1130 l->stats.recv_bundled += msg_msgcnt(hdr);
1131 while (tipc_msg_extract(skb, &iskb, &pos))
1132 tipc_data_input(l, iskb, &tmpq);
1133 tipc_skb_queue_splice_tail(&tmpq, inputq);
1134 return 0;
1135 } else if (usr == MSG_FRAGMENTER) {
1136 l->stats.recv_fragments++;
1137 if (tipc_buf_append(reasm_skb, &skb)) {
1138 l->stats.recv_fragmented++;
1139 tipc_data_input(l, skb, inputq);
1140 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1141 pr_warn_ratelimited("Unable to build fragment list\n");
1142 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1143 }
1144 return 0;
1145 } else if (usr == BCAST_PROTOCOL) {
1146 tipc_bcast_lock(l->net);
1147 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1148 tipc_bcast_unlock(l->net);
1149 }
1150 drop:
1151 kfree_skb(skb);
1152 return 0;
1153 }
1154
tipc_link_release_pkts(struct tipc_link * l,u16 acked)1155 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1156 {
1157 bool released = false;
1158 struct sk_buff *skb, *tmp;
1159
1160 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1161 if (more(buf_seqno(skb), acked))
1162 break;
1163 __skb_unlink(skb, &l->transmq);
1164 kfree_skb(skb);
1165 released = true;
1166 }
1167 return released;
1168 }
1169
1170 /* tipc_link_build_state_msg: prepare link state message for transmission
1171 *
1172 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1173 * risk of ack storms towards the sender
1174 */
tipc_link_build_state_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1175 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1176 {
1177 if (!l)
1178 return 0;
1179
1180 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1181 if (link_is_bc_rcvlink(l)) {
1182 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1183 return 0;
1184 l->rcv_unacked = 0;
1185
1186 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1187 l->snd_nxt = l->rcv_nxt;
1188 return TIPC_LINK_SND_STATE;
1189 }
1190
1191 /* Unicast ACK */
1192 l->rcv_unacked = 0;
1193 l->stats.sent_acks++;
1194 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1195 return 0;
1196 }
1197
1198 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1199 */
tipc_link_build_reset_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1200 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1201 {
1202 int mtyp = RESET_MSG;
1203 struct sk_buff *skb;
1204
1205 if (l->state == LINK_ESTABLISHING)
1206 mtyp = ACTIVATE_MSG;
1207
1208 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1209
1210 /* Inform peer that this endpoint is going down if applicable */
1211 skb = skb_peek_tail(xmitq);
1212 if (skb && (l->state == LINK_RESET))
1213 msg_set_peer_stopping(buf_msg(skb), 1);
1214 }
1215
1216 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1217 * Note that sending of broadcast NACK is coordinated among nodes, to
1218 * reduce the risk of NACK storms towards the sender
1219 */
tipc_link_build_nack_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1220 static int tipc_link_build_nack_msg(struct tipc_link *l,
1221 struct sk_buff_head *xmitq)
1222 {
1223 u32 def_cnt = ++l->stats.deferred_recv;
1224 int match1, match2;
1225
1226 if (link_is_bc_rcvlink(l)) {
1227 match1 = def_cnt & 0xf;
1228 match2 = tipc_own_addr(l->net) & 0xf;
1229 if (match1 == match2)
1230 return TIPC_LINK_SND_STATE;
1231 return 0;
1232 }
1233
1234 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1235 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1236 return 0;
1237 }
1238
1239 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1240 * @l: the link that should handle the message
1241 * @skb: TIPC packet
1242 * @xmitq: queue to place packets to be sent after this call
1243 */
tipc_link_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * xmitq)1244 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1245 struct sk_buff_head *xmitq)
1246 {
1247 struct sk_buff_head *defq = &l->deferdq;
1248 struct tipc_msg *hdr;
1249 u16 seqno, rcv_nxt, win_lim;
1250 int rc = 0;
1251
1252 do {
1253 hdr = buf_msg(skb);
1254 seqno = msg_seqno(hdr);
1255 rcv_nxt = l->rcv_nxt;
1256 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1257
1258 /* Verify and update link state */
1259 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1260 return tipc_link_proto_rcv(l, skb, xmitq);
1261
1262 if (unlikely(!link_is_up(l))) {
1263 if (l->state == LINK_ESTABLISHING)
1264 rc = TIPC_LINK_UP_EVT;
1265 goto drop;
1266 }
1267
1268 /* Don't send probe at next timeout expiration */
1269 l->silent_intv_cnt = 0;
1270
1271 /* Drop if outside receive window */
1272 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1273 l->stats.duplicates++;
1274 goto drop;
1275 }
1276
1277 /* Forward queues and wake up waiting users */
1278 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1279 tipc_link_advance_backlog(l, xmitq);
1280 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1281 link_prepare_wakeup(l);
1282 }
1283
1284 /* Defer delivery if sequence gap */
1285 if (unlikely(seqno != rcv_nxt)) {
1286 __tipc_skb_queue_sorted(defq, seqno, skb);
1287 rc |= tipc_link_build_nack_msg(l, xmitq);
1288 break;
1289 }
1290
1291 /* Deliver packet */
1292 l->rcv_nxt++;
1293 l->stats.recv_pkts++;
1294 if (!tipc_data_input(l, skb, l->inputq))
1295 rc |= tipc_link_input(l, skb, l->inputq);
1296 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1297 rc |= tipc_link_build_state_msg(l, xmitq);
1298 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1299 break;
1300 } while ((skb = __skb_dequeue(defq)));
1301
1302 return rc;
1303 drop:
1304 kfree_skb(skb);
1305 return rc;
1306 }
1307
tipc_link_build_proto_msg(struct tipc_link * l,int mtyp,bool probe,u16 rcvgap,int tolerance,int priority,struct sk_buff_head * xmitq)1308 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1309 u16 rcvgap, int tolerance, int priority,
1310 struct sk_buff_head *xmitq)
1311 {
1312 struct tipc_link *bcl = l->bc_rcvlink;
1313 struct sk_buff *skb;
1314 struct tipc_msg *hdr;
1315 struct sk_buff_head *dfq = &l->deferdq;
1316 bool node_up = link_is_up(bcl);
1317 struct tipc_mon_state *mstate = &l->mon_state;
1318 int dlen = 0;
1319 void *data;
1320
1321 /* Don't send protocol message during reset or link failover */
1322 if (tipc_link_is_blocked(l))
1323 return;
1324
1325 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1326 return;
1327
1328 if (!skb_queue_empty(dfq))
1329 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1330
1331 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1332 tipc_max_domain_size, l->addr,
1333 tipc_own_addr(l->net), 0, 0, 0);
1334 if (!skb)
1335 return;
1336
1337 hdr = buf_msg(skb);
1338 data = msg_data(hdr);
1339 msg_set_session(hdr, l->session);
1340 msg_set_bearer_id(hdr, l->bearer_id);
1341 msg_set_net_plane(hdr, l->net_plane);
1342 msg_set_next_sent(hdr, l->snd_nxt);
1343 msg_set_ack(hdr, l->rcv_nxt - 1);
1344 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1345 msg_set_bc_ack_invalid(hdr, !node_up);
1346 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1347 msg_set_link_tolerance(hdr, tolerance);
1348 msg_set_linkprio(hdr, priority);
1349 msg_set_redundant_link(hdr, node_up);
1350 msg_set_seq_gap(hdr, 0);
1351 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1352
1353 if (mtyp == STATE_MSG) {
1354 msg_set_seq_gap(hdr, rcvgap);
1355 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1356 msg_set_probe(hdr, probe);
1357 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1358 msg_set_size(hdr, INT_H_SIZE + dlen);
1359 skb_trim(skb, INT_H_SIZE + dlen);
1360 l->stats.sent_states++;
1361 l->rcv_unacked = 0;
1362 } else {
1363 /* RESET_MSG or ACTIVATE_MSG */
1364 msg_set_max_pkt(hdr, l->advertised_mtu);
1365 strcpy(data, l->if_name);
1366 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1367 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1368 }
1369 if (probe)
1370 l->stats.sent_probes++;
1371 if (rcvgap)
1372 l->stats.sent_nacks++;
1373 skb->priority = TC_PRIO_CONTROL;
1374 __skb_queue_tail(xmitq, skb);
1375 }
1376
1377 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1378 * with contents of the link's transmit and backlog queues.
1379 */
tipc_link_tnl_prepare(struct tipc_link * l,struct tipc_link * tnl,int mtyp,struct sk_buff_head * xmitq)1380 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1381 int mtyp, struct sk_buff_head *xmitq)
1382 {
1383 struct sk_buff *skb, *tnlskb;
1384 struct tipc_msg *hdr, tnlhdr;
1385 struct sk_buff_head *queue = &l->transmq;
1386 struct sk_buff_head tmpxq, tnlq;
1387 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1388
1389 if (!tnl)
1390 return;
1391
1392 skb_queue_head_init(&tnlq);
1393 skb_queue_head_init(&tmpxq);
1394
1395 /* At least one packet required for safe algorithm => add dummy */
1396 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1397 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1398 0, 0, TIPC_ERR_NO_PORT);
1399 if (!skb) {
1400 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1401 return;
1402 }
1403 skb_queue_tail(&tnlq, skb);
1404 tipc_link_xmit(l, &tnlq, &tmpxq);
1405 __skb_queue_purge(&tmpxq);
1406
1407 /* Initialize reusable tunnel packet header */
1408 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1409 mtyp, INT_H_SIZE, l->addr);
1410 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1411 msg_set_msgcnt(&tnlhdr, pktcnt);
1412 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1413 tnl:
1414 /* Wrap each packet into a tunnel packet */
1415 skb_queue_walk(queue, skb) {
1416 hdr = buf_msg(skb);
1417 if (queue == &l->backlogq)
1418 msg_set_seqno(hdr, seqno++);
1419 pktlen = msg_size(hdr);
1420 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1421 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1422 if (!tnlskb) {
1423 pr_warn("%sunable to send packet\n", link_co_err);
1424 return;
1425 }
1426 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1427 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1428 __skb_queue_tail(&tnlq, tnlskb);
1429 }
1430 if (queue != &l->backlogq) {
1431 queue = &l->backlogq;
1432 goto tnl;
1433 }
1434
1435 tipc_link_xmit(tnl, &tnlq, xmitq);
1436
1437 if (mtyp == FAILOVER_MSG) {
1438 tnl->drop_point = l->rcv_nxt;
1439 tnl->failover_reasm_skb = l->reasm_buf;
1440 l->reasm_buf = NULL;
1441 }
1442 }
1443
1444 /* tipc_link_proto_rcv(): receive link level protocol message :
1445 * Note that network plane id propagates through the network, and may
1446 * change at any time. The node with lowest numerical id determines
1447 * network plane
1448 */
tipc_link_proto_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * xmitq)1449 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1450 struct sk_buff_head *xmitq)
1451 {
1452 struct tipc_msg *hdr = buf_msg(skb);
1453 u16 rcvgap = 0;
1454 u16 ack = msg_ack(hdr);
1455 u16 gap = msg_seq_gap(hdr);
1456 u16 peers_snd_nxt = msg_next_sent(hdr);
1457 u16 peers_tol = msg_link_tolerance(hdr);
1458 u16 peers_prio = msg_linkprio(hdr);
1459 u16 rcv_nxt = l->rcv_nxt;
1460 u16 dlen = msg_data_sz(hdr);
1461 int mtyp = msg_type(hdr);
1462 void *data;
1463 char *if_name;
1464 int rc = 0;
1465
1466 if (tipc_link_is_blocked(l) || !xmitq)
1467 goto exit;
1468
1469 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1470 l->net_plane = msg_net_plane(hdr);
1471
1472 skb_linearize(skb);
1473 hdr = buf_msg(skb);
1474 data = msg_data(hdr);
1475
1476 switch (mtyp) {
1477 case RESET_MSG:
1478
1479 /* Ignore duplicate RESET with old session number */
1480 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1481 (l->peer_session != ANY_SESSION))
1482 break;
1483 /* fall thru' */
1484
1485 case ACTIVATE_MSG:
1486
1487 /* Complete own link name with peer's interface name */
1488 if_name = strrchr(l->name, ':') + 1;
1489 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1490 break;
1491 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1492 break;
1493 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1494
1495 /* Update own tolerance if peer indicates a non-zero value */
1496 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1497 l->tolerance = peers_tol;
1498
1499 /* Update own priority if peer's priority is higher */
1500 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1501 l->priority = peers_prio;
1502
1503 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1504 if (msg_peer_stopping(hdr))
1505 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1506 else if ((mtyp == RESET_MSG) || !link_is_up(l))
1507 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1508
1509 /* ACTIVATE_MSG takes up link if it was already locally reset */
1510 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1511 rc = TIPC_LINK_UP_EVT;
1512
1513 l->peer_session = msg_session(hdr);
1514 l->peer_bearer_id = msg_bearer_id(hdr);
1515 if (l->mtu > msg_max_pkt(hdr))
1516 l->mtu = msg_max_pkt(hdr);
1517 break;
1518
1519 case STATE_MSG:
1520
1521 /* Update own tolerance if peer indicates a non-zero value */
1522 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1523 l->tolerance = peers_tol;
1524
1525 /* Update own prio if peer indicates a different value */
1526 if ((peers_prio != l->priority) &&
1527 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
1528 l->priority = peers_prio;
1529 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1530 }
1531
1532 l->silent_intv_cnt = 0;
1533 l->stats.recv_states++;
1534 if (msg_probe(hdr))
1535 l->stats.recv_probes++;
1536
1537 if (!link_is_up(l)) {
1538 if (l->state == LINK_ESTABLISHING)
1539 rc = TIPC_LINK_UP_EVT;
1540 break;
1541 }
1542 tipc_mon_rcv(l->net, data, dlen, l->addr,
1543 &l->mon_state, l->bearer_id);
1544
1545 /* Send NACK if peer has sent pkts we haven't received yet */
1546 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1547 rcvgap = peers_snd_nxt - l->rcv_nxt;
1548 if (rcvgap || (msg_probe(hdr)))
1549 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1550 0, 0, xmitq);
1551 tipc_link_release_pkts(l, ack);
1552
1553 /* If NACK, retransmit will now start at right position */
1554 if (gap) {
1555 rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
1556 l->stats.recv_nacks++;
1557 }
1558
1559 tipc_link_advance_backlog(l, xmitq);
1560 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1561 link_prepare_wakeup(l);
1562 }
1563 exit:
1564 kfree_skb(skb);
1565 return rc;
1566 }
1567
1568 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1569 */
tipc_link_build_bc_proto_msg(struct tipc_link * l,bool bcast,u16 peers_snd_nxt,struct sk_buff_head * xmitq)1570 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1571 u16 peers_snd_nxt,
1572 struct sk_buff_head *xmitq)
1573 {
1574 struct sk_buff *skb;
1575 struct tipc_msg *hdr;
1576 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1577 u16 ack = l->rcv_nxt - 1;
1578 u16 gap_to = peers_snd_nxt - 1;
1579
1580 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1581 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1582 if (!skb)
1583 return false;
1584 hdr = buf_msg(skb);
1585 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1586 msg_set_bcast_ack(hdr, ack);
1587 msg_set_bcgap_after(hdr, ack);
1588 if (dfrd_skb)
1589 gap_to = buf_seqno(dfrd_skb) - 1;
1590 msg_set_bcgap_to(hdr, gap_to);
1591 msg_set_non_seq(hdr, bcast);
1592 __skb_queue_tail(xmitq, skb);
1593 return true;
1594 }
1595
1596 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1597 *
1598 * Give a newly added peer node the sequence number where it should
1599 * start receiving and acking broadcast packets.
1600 */
tipc_link_build_bc_init_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1601 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1602 struct sk_buff_head *xmitq)
1603 {
1604 struct sk_buff_head list;
1605
1606 __skb_queue_head_init(&list);
1607 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1608 return;
1609 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
1610 tipc_link_xmit(l, &list, xmitq);
1611 }
1612
1613 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1614 */
tipc_link_bc_init_rcv(struct tipc_link * l,struct tipc_msg * hdr)1615 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1616 {
1617 int mtyp = msg_type(hdr);
1618 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1619
1620 if (link_is_up(l))
1621 return;
1622
1623 if (msg_user(hdr) == BCAST_PROTOCOL) {
1624 l->rcv_nxt = peers_snd_nxt;
1625 l->state = LINK_ESTABLISHED;
1626 return;
1627 }
1628
1629 if (l->peer_caps & TIPC_BCAST_SYNCH)
1630 return;
1631
1632 if (msg_peer_node_is_up(hdr))
1633 return;
1634
1635 /* Compatibility: accept older, less safe initial synch data */
1636 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1637 l->rcv_nxt = peers_snd_nxt;
1638 }
1639
1640 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1641 * - Adjust permitted range if there is overlap with previous retransmission
1642 */
link_bc_retr_eval(struct tipc_link * l,u16 * from,u16 * to)1643 static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1644 {
1645 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1646
1647 if (less(*to, *from))
1648 return false;
1649
1650 /* New retransmission request */
1651 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1652 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1653 l->prev_from = *from;
1654 l->prev_to = *to;
1655 l->prev_retr = jiffies;
1656 return true;
1657 }
1658
1659 /* Inside range of previous retransmit */
1660 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1661 return false;
1662
1663 /* Fully or partially outside previous range => exclude overlap */
1664 if (less(*from, l->prev_from)) {
1665 *to = l->prev_from - 1;
1666 l->prev_from = *from;
1667 }
1668 if (more(*to, l->prev_to)) {
1669 *from = l->prev_to + 1;
1670 l->prev_to = *to;
1671 }
1672 l->prev_retr = jiffies;
1673 return true;
1674 }
1675
1676 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1677 */
tipc_link_bc_sync_rcv(struct tipc_link * l,struct tipc_msg * hdr,struct sk_buff_head * xmitq)1678 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1679 struct sk_buff_head *xmitq)
1680 {
1681 struct tipc_link *snd_l = l->bc_sndlink;
1682 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1683 u16 from = msg_bcast_ack(hdr) + 1;
1684 u16 to = from + msg_bc_gap(hdr) - 1;
1685 int rc = 0;
1686
1687 if (!link_is_up(l))
1688 return rc;
1689
1690 if (!msg_peer_node_is_up(hdr))
1691 return rc;
1692
1693 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1694 if (msg_ack(hdr))
1695 l->bc_peer_is_up = true;
1696
1697 if (!l->bc_peer_is_up)
1698 return rc;
1699
1700 l->stats.recv_nacks++;
1701
1702 /* Ignore if peers_snd_nxt goes beyond receive window */
1703 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1704 return rc;
1705
1706 if (link_bc_retr_eval(snd_l, &from, &to))
1707 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
1708
1709 l->snd_nxt = peers_snd_nxt;
1710 if (link_bc_rcv_gap(l))
1711 rc |= TIPC_LINK_SND_STATE;
1712
1713 /* Return now if sender supports nack via STATE messages */
1714 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1715 return rc;
1716
1717 /* Otherwise, be backwards compatible */
1718
1719 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1720 l->nack_state = BC_NACK_SND_CONDITIONAL;
1721 return 0;
1722 }
1723
1724 /* Don't NACK if one was recently sent or peeked */
1725 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1726 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1727 return 0;
1728 }
1729
1730 /* Conditionally delay NACK sending until next synch rcv */
1731 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1732 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1733 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1734 return 0;
1735 }
1736
1737 /* Send NACK now but suppress next one */
1738 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1739 l->nack_state = BC_NACK_SND_SUPPRESS;
1740 return 0;
1741 }
1742
tipc_link_bc_ack_rcv(struct tipc_link * l,u16 acked,struct sk_buff_head * xmitq)1743 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1744 struct sk_buff_head *xmitq)
1745 {
1746 struct sk_buff *skb, *tmp;
1747 struct tipc_link *snd_l = l->bc_sndlink;
1748
1749 if (!link_is_up(l) || !l->bc_peer_is_up)
1750 return;
1751
1752 if (!more(acked, l->acked))
1753 return;
1754
1755 /* Skip over packets peer has already acked */
1756 skb_queue_walk(&snd_l->transmq, skb) {
1757 if (more(buf_seqno(skb), l->acked))
1758 break;
1759 }
1760
1761 /* Update/release the packets peer is acking now */
1762 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1763 if (more(buf_seqno(skb), acked))
1764 break;
1765 if (!--TIPC_SKB_CB(skb)->ackers) {
1766 __skb_unlink(skb, &snd_l->transmq);
1767 kfree_skb(skb);
1768 }
1769 }
1770 l->acked = acked;
1771 tipc_link_advance_backlog(snd_l, xmitq);
1772 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1773 link_prepare_wakeup(snd_l);
1774 }
1775
1776 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1777 * This function is here for backwards compatibility, since
1778 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1779 */
tipc_link_bc_nack_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * xmitq)1780 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1781 struct sk_buff_head *xmitq)
1782 {
1783 struct tipc_msg *hdr = buf_msg(skb);
1784 u32 dnode = msg_destnode(hdr);
1785 int mtyp = msg_type(hdr);
1786 u16 acked = msg_bcast_ack(hdr);
1787 u16 from = acked + 1;
1788 u16 to = msg_bcgap_to(hdr);
1789 u16 peers_snd_nxt = to + 1;
1790 int rc = 0;
1791
1792 kfree_skb(skb);
1793
1794 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1795 return 0;
1796
1797 if (mtyp != STATE_MSG)
1798 return 0;
1799
1800 if (dnode == tipc_own_addr(l->net)) {
1801 tipc_link_bc_ack_rcv(l, acked, xmitq);
1802 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
1803 l->stats.recv_nacks++;
1804 return rc;
1805 }
1806
1807 /* Msg for other node => suppress own NACK at next sync if applicable */
1808 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1809 l->nack_state = BC_NACK_SND_SUPPRESS;
1810
1811 return 0;
1812 }
1813
tipc_link_set_queue_limits(struct tipc_link * l,u32 win)1814 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1815 {
1816 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1817
1818 l->window = win;
1819 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1820 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1821 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1822 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1823 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1824 }
1825
1826 /**
1827 * link_reset_stats - reset link statistics
1828 * @l: pointer to link
1829 */
tipc_link_reset_stats(struct tipc_link * l)1830 void tipc_link_reset_stats(struct tipc_link *l)
1831 {
1832 memset(&l->stats, 0, sizeof(l->stats));
1833 }
1834
link_print(struct tipc_link * l,const char * str)1835 static void link_print(struct tipc_link *l, const char *str)
1836 {
1837 struct sk_buff *hskb = skb_peek(&l->transmq);
1838 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1839 u16 tail = l->snd_nxt - 1;
1840
1841 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1842 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1843 skb_queue_len(&l->transmq), head, tail,
1844 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1845 }
1846
1847 /* Parse and validate nested (link) properties valid for media, bearer and link
1848 */
tipc_nl_parse_link_prop(struct nlattr * prop,struct nlattr * props[])1849 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1850 {
1851 int err;
1852
1853 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1854 tipc_nl_prop_policy, NULL);
1855 if (err)
1856 return err;
1857
1858 if (props[TIPC_NLA_PROP_PRIO]) {
1859 u32 prio;
1860
1861 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1862 if (prio > TIPC_MAX_LINK_PRI)
1863 return -EINVAL;
1864 }
1865
1866 if (props[TIPC_NLA_PROP_TOL]) {
1867 u32 tol;
1868
1869 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1870 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1871 return -EINVAL;
1872 }
1873
1874 if (props[TIPC_NLA_PROP_WIN]) {
1875 u32 win;
1876
1877 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1878 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1879 return -EINVAL;
1880 }
1881
1882 return 0;
1883 }
1884
__tipc_nl_add_stats(struct sk_buff * skb,struct tipc_stats * s)1885 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1886 {
1887 int i;
1888 struct nlattr *stats;
1889
1890 struct nla_map {
1891 u32 key;
1892 u32 val;
1893 };
1894
1895 struct nla_map map[] = {
1896 {TIPC_NLA_STATS_RX_INFO, 0},
1897 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1898 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1899 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1900 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1901 {TIPC_NLA_STATS_TX_INFO, 0},
1902 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1903 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1904 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1905 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1906 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1907 s->msg_length_counts : 1},
1908 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1909 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1910 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1911 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1912 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1913 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1914 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1915 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1916 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1917 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1918 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1919 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1920 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1921 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1922 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1923 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1924 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1925 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1926 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1927 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1928 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1929 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1930 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1931 };
1932
1933 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1934 if (!stats)
1935 return -EMSGSIZE;
1936
1937 for (i = 0; i < ARRAY_SIZE(map); i++)
1938 if (nla_put_u32(skb, map[i].key, map[i].val))
1939 goto msg_full;
1940
1941 nla_nest_end(skb, stats);
1942
1943 return 0;
1944 msg_full:
1945 nla_nest_cancel(skb, stats);
1946
1947 return -EMSGSIZE;
1948 }
1949
1950 /* Caller should hold appropriate locks to protect the link */
__tipc_nl_add_link(struct net * net,struct tipc_nl_msg * msg,struct tipc_link * link,int nlflags)1951 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1952 struct tipc_link *link, int nlflags)
1953 {
1954 int err;
1955 void *hdr;
1956 struct nlattr *attrs;
1957 struct nlattr *prop;
1958 struct tipc_net *tn = net_generic(net, tipc_net_id);
1959
1960 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1961 nlflags, TIPC_NL_LINK_GET);
1962 if (!hdr)
1963 return -EMSGSIZE;
1964
1965 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1966 if (!attrs)
1967 goto msg_full;
1968
1969 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1970 goto attr_msg_full;
1971 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1972 tipc_cluster_mask(tn->own_addr)))
1973 goto attr_msg_full;
1974 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1975 goto attr_msg_full;
1976 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
1977 goto attr_msg_full;
1978 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
1979 goto attr_msg_full;
1980
1981 if (tipc_link_is_up(link))
1982 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1983 goto attr_msg_full;
1984 if (link->active)
1985 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1986 goto attr_msg_full;
1987
1988 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1989 if (!prop)
1990 goto attr_msg_full;
1991 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1992 goto prop_msg_full;
1993 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1994 goto prop_msg_full;
1995 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1996 link->window))
1997 goto prop_msg_full;
1998 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1999 goto prop_msg_full;
2000 nla_nest_end(msg->skb, prop);
2001
2002 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2003 if (err)
2004 goto attr_msg_full;
2005
2006 nla_nest_end(msg->skb, attrs);
2007 genlmsg_end(msg->skb, hdr);
2008
2009 return 0;
2010
2011 prop_msg_full:
2012 nla_nest_cancel(msg->skb, prop);
2013 attr_msg_full:
2014 nla_nest_cancel(msg->skb, attrs);
2015 msg_full:
2016 genlmsg_cancel(msg->skb, hdr);
2017
2018 return -EMSGSIZE;
2019 }
2020
__tipc_nl_add_bc_link_stat(struct sk_buff * skb,struct tipc_stats * stats)2021 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2022 struct tipc_stats *stats)
2023 {
2024 int i;
2025 struct nlattr *nest;
2026
2027 struct nla_map {
2028 __u32 key;
2029 __u32 val;
2030 };
2031
2032 struct nla_map map[] = {
2033 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2034 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2035 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2036 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2037 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2038 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2039 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2040 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2041 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2042 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2043 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2044 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2045 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2046 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2047 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2048 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2049 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2050 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2051 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2052 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2053 };
2054
2055 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2056 if (!nest)
2057 return -EMSGSIZE;
2058
2059 for (i = 0; i < ARRAY_SIZE(map); i++)
2060 if (nla_put_u32(skb, map[i].key, map[i].val))
2061 goto msg_full;
2062
2063 nla_nest_end(skb, nest);
2064
2065 return 0;
2066 msg_full:
2067 nla_nest_cancel(skb, nest);
2068
2069 return -EMSGSIZE;
2070 }
2071
tipc_nl_add_bc_link(struct net * net,struct tipc_nl_msg * msg)2072 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2073 {
2074 int err;
2075 void *hdr;
2076 struct nlattr *attrs;
2077 struct nlattr *prop;
2078 struct tipc_net *tn = net_generic(net, tipc_net_id);
2079 struct tipc_link *bcl = tn->bcl;
2080
2081 if (!bcl)
2082 return 0;
2083
2084 tipc_bcast_lock(net);
2085
2086 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2087 NLM_F_MULTI, TIPC_NL_LINK_GET);
2088 if (!hdr) {
2089 tipc_bcast_unlock(net);
2090 return -EMSGSIZE;
2091 }
2092
2093 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2094 if (!attrs)
2095 goto msg_full;
2096
2097 /* The broadcast link is always up */
2098 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2099 goto attr_msg_full;
2100
2101 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2102 goto attr_msg_full;
2103 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2104 goto attr_msg_full;
2105 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2106 goto attr_msg_full;
2107 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2108 goto attr_msg_full;
2109
2110 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2111 if (!prop)
2112 goto attr_msg_full;
2113 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2114 goto prop_msg_full;
2115 nla_nest_end(msg->skb, prop);
2116
2117 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2118 if (err)
2119 goto attr_msg_full;
2120
2121 tipc_bcast_unlock(net);
2122 nla_nest_end(msg->skb, attrs);
2123 genlmsg_end(msg->skb, hdr);
2124
2125 return 0;
2126
2127 prop_msg_full:
2128 nla_nest_cancel(msg->skb, prop);
2129 attr_msg_full:
2130 nla_nest_cancel(msg->skb, attrs);
2131 msg_full:
2132 tipc_bcast_unlock(net);
2133 genlmsg_cancel(msg->skb, hdr);
2134
2135 return -EMSGSIZE;
2136 }
2137
tipc_link_set_tolerance(struct tipc_link * l,u32 tol,struct sk_buff_head * xmitq)2138 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2139 struct sk_buff_head *xmitq)
2140 {
2141 l->tolerance = tol;
2142 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
2143 }
2144
tipc_link_set_prio(struct tipc_link * l,u32 prio,struct sk_buff_head * xmitq)2145 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2146 struct sk_buff_head *xmitq)
2147 {
2148 l->priority = prio;
2149 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
2150 }
2151
tipc_link_set_abort_limit(struct tipc_link * l,u32 limit)2152 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2153 {
2154 l->abort_limit = limit;
2155 }
2156