• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 #include "trace.h"
47 
48 #include <linux/pkt_sched.h>
49 
50 struct tipc_stats {
51 	u32 sent_pkts;
52 	u32 recv_pkts;
53 	u32 sent_states;
54 	u32 recv_states;
55 	u32 sent_probes;
56 	u32 recv_probes;
57 	u32 sent_nacks;
58 	u32 recv_nacks;
59 	u32 sent_acks;
60 	u32 sent_bundled;
61 	u32 sent_bundles;
62 	u32 recv_bundled;
63 	u32 recv_bundles;
64 	u32 retransmitted;
65 	u32 sent_fragmented;
66 	u32 sent_fragments;
67 	u32 recv_fragmented;
68 	u32 recv_fragments;
69 	u32 link_congs;		/* # port sends blocked by congestion */
70 	u32 deferred_recv;
71 	u32 duplicates;
72 	u32 max_queue_sz;	/* send queue size high water mark */
73 	u32 accu_queue_sz;	/* used for send queue size profiling */
74 	u32 queue_sz_counts;	/* used for send queue size profiling */
75 	u32 msg_length_counts;	/* used for message length profiling */
76 	u32 msg_lengths_total;	/* used for message length profiling */
77 	u32 msg_length_profile[7]; /* used for msg. length profiling */
78 };
79 
80 /**
81  * struct tipc_link - TIPC link data structure
82  * @addr: network address of link's peer node
83  * @name: link name character string
84  * @media_addr: media address to use when sending messages over link
85  * @timer: link timer
86  * @net: pointer to namespace struct
87  * @refcnt: reference counter for permanent references (owner node & timer)
88  * @peer_session: link session # being used by peer end of link
89  * @peer_bearer_id: bearer id used by link's peer endpoint
90  * @bearer_id: local bearer id used by link
91  * @tolerance: minimum link continuity loss needed to reset link [in ms]
92  * @abort_limit: # of unacknowledged continuity probes needed to reset link
93  * @state: current state of link FSM
94  * @peer_caps: bitmap describing capabilities of peer node
95  * @silent_intv_cnt: # of timer intervals without any reception from peer
96  * @proto_msg: template for control messages generated by link
97  * @pmsg: convenience pointer to "proto_msg" field
98  * @priority: current link priority
99  * @net_plane: current link network plane ('A' through 'H')
100  * @mon_state: cookie with information needed by link monitor
101  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102  * @exp_msg_count: # of tunnelled messages expected during link changeover
103  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104  * @mtu: current maximum packet size for this link
105  * @advertised_mtu: advertised own mtu when link is being established
106  * @transmitq: queue for sent, non-acked messages
107  * @backlogq: queue for messages waiting to be sent
108  * @snt_nxt: next sequence number to use for outbound messages
109  * @ackers: # of peers that needs to ack each packet before it can be released
110  * @acked: # last packet acked by a certain peer. Used for broadcast.
111  * @rcv_nxt: next sequence number to expect for inbound messages
112  * @deferred_queue: deferred queue saved OOS b'cast message received from node
113  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
114  * @inputq: buffer queue for messages to be delivered upwards
115  * @namedq: buffer queue for name table messages to be delivered upwards
116  * @next_out: ptr to first unsent outbound message in queue
117  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
118  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
119  * @reasm_buf: head of partially reassembled inbound message fragments
120  * @bc_rcvr: marks that this is a broadcast receiver link
121  * @stats: collects statistics regarding link activity
122  */
123 struct tipc_link {
124 	u32 addr;
125 	char name[TIPC_MAX_LINK_NAME];
126 	struct net *net;
127 
128 	/* Management and link supervision data */
129 	u16 peer_session;
130 	u16 session;
131 	u16 snd_nxt_state;
132 	u16 rcv_nxt_state;
133 	u32 peer_bearer_id;
134 	u32 bearer_id;
135 	u32 tolerance;
136 	u32 abort_limit;
137 	u32 state;
138 	u16 peer_caps;
139 	bool in_session;
140 	bool active;
141 	u32 silent_intv_cnt;
142 	char if_name[TIPC_MAX_IF_NAME];
143 	u32 priority;
144 	char net_plane;
145 	struct tipc_mon_state mon_state;
146 	u16 rst_cnt;
147 
148 	/* Failover/synch */
149 	u16 drop_point;
150 	struct sk_buff *failover_reasm_skb;
151 	struct sk_buff_head failover_deferdq;
152 
153 	/* Max packet negotiation */
154 	u16 mtu;
155 	u16 advertised_mtu;
156 
157 	/* Sending */
158 	struct sk_buff_head transmq;
159 	struct sk_buff_head backlogq;
160 	struct {
161 		u16 len;
162 		u16 limit;
163 		struct sk_buff *target_bskb;
164 	} backlog[5];
165 	u16 snd_nxt;
166 	u16 window;
167 
168 	/* Reception */
169 	u16 rcv_nxt;
170 	u32 rcv_unacked;
171 	struct sk_buff_head deferdq;
172 	struct sk_buff_head *inputq;
173 	struct sk_buff_head *namedq;
174 
175 	/* Congestion handling */
176 	struct sk_buff_head wakeupq;
177 
178 	/* Fragmentation/reassembly */
179 	struct sk_buff *reasm_buf;
180 	struct sk_buff *reasm_tnlmsg;
181 
182 	/* Broadcast */
183 	u16 ackers;
184 	u16 acked;
185 	struct tipc_link *bc_rcvlink;
186 	struct tipc_link *bc_sndlink;
187 	u8 nack_state;
188 	bool bc_peer_is_up;
189 
190 	/* Statistics */
191 	struct tipc_stats stats;
192 };
193 
194 /*
195  * Error message prefixes
196  */
197 static const char *link_co_err = "Link tunneling error, ";
198 static const char *link_rst_msg = "Resetting link ";
199 
200 /* Send states for broadcast NACKs
201  */
202 enum {
203 	BC_NACK_SND_CONDITIONAL,
204 	BC_NACK_SND_UNCONDITIONAL,
205 	BC_NACK_SND_SUPPRESS,
206 };
207 
208 #define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
209 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
210 
211 /*
212  * Interval between NACKs when packets arrive out of order
213  */
214 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
215 
216 /* Link FSM states:
217  */
218 enum {
219 	LINK_ESTABLISHED     = 0xe,
220 	LINK_ESTABLISHING    = 0xe  << 4,
221 	LINK_RESET           = 0x1  << 8,
222 	LINK_RESETTING       = 0x2  << 12,
223 	LINK_PEER_RESET      = 0xd  << 16,
224 	LINK_FAILINGOVER     = 0xf  << 20,
225 	LINK_SYNCHING        = 0xc  << 24
226 };
227 
228 /* Link FSM state checking routines
229  */
link_is_up(struct tipc_link * l)230 static int link_is_up(struct tipc_link *l)
231 {
232 	return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
233 }
234 
235 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
236 			       struct sk_buff_head *xmitq);
237 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
238 				      bool probe_reply, u16 rcvgap,
239 				      int tolerance, int priority,
240 				      struct sk_buff_head *xmitq);
241 static void link_print(struct tipc_link *l, const char *str);
242 static int tipc_link_build_nack_msg(struct tipc_link *l,
243 				    struct sk_buff_head *xmitq);
244 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
245 					struct sk_buff_head *xmitq);
246 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
247 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
248 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
249 				     struct tipc_gap_ack_blks *ga,
250 				     struct sk_buff_head *xmitq);
251 
252 /*
253  *  Simple non-static link routines (i.e. referenced outside this file)
254  */
tipc_link_is_up(struct tipc_link * l)255 bool tipc_link_is_up(struct tipc_link *l)
256 {
257 	return link_is_up(l);
258 }
259 
tipc_link_peer_is_down(struct tipc_link * l)260 bool tipc_link_peer_is_down(struct tipc_link *l)
261 {
262 	return l->state == LINK_PEER_RESET;
263 }
264 
tipc_link_is_reset(struct tipc_link * l)265 bool tipc_link_is_reset(struct tipc_link *l)
266 {
267 	return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
268 }
269 
tipc_link_is_establishing(struct tipc_link * l)270 bool tipc_link_is_establishing(struct tipc_link *l)
271 {
272 	return l->state == LINK_ESTABLISHING;
273 }
274 
tipc_link_is_synching(struct tipc_link * l)275 bool tipc_link_is_synching(struct tipc_link *l)
276 {
277 	return l->state == LINK_SYNCHING;
278 }
279 
tipc_link_is_failingover(struct tipc_link * l)280 bool tipc_link_is_failingover(struct tipc_link *l)
281 {
282 	return l->state == LINK_FAILINGOVER;
283 }
284 
tipc_link_is_blocked(struct tipc_link * l)285 bool tipc_link_is_blocked(struct tipc_link *l)
286 {
287 	return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
288 }
289 
link_is_bc_sndlink(struct tipc_link * l)290 static bool link_is_bc_sndlink(struct tipc_link *l)
291 {
292 	return !l->bc_sndlink;
293 }
294 
link_is_bc_rcvlink(struct tipc_link * l)295 static bool link_is_bc_rcvlink(struct tipc_link *l)
296 {
297 	return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
298 }
299 
tipc_link_set_active(struct tipc_link * l,bool active)300 void tipc_link_set_active(struct tipc_link *l, bool active)
301 {
302 	l->active = active;
303 }
304 
tipc_link_id(struct tipc_link * l)305 u32 tipc_link_id(struct tipc_link *l)
306 {
307 	return l->peer_bearer_id << 16 | l->bearer_id;
308 }
309 
tipc_link_window(struct tipc_link * l)310 int tipc_link_window(struct tipc_link *l)
311 {
312 	return l->window;
313 }
314 
tipc_link_prio(struct tipc_link * l)315 int tipc_link_prio(struct tipc_link *l)
316 {
317 	return l->priority;
318 }
319 
tipc_link_tolerance(struct tipc_link * l)320 unsigned long tipc_link_tolerance(struct tipc_link *l)
321 {
322 	return l->tolerance;
323 }
324 
tipc_link_inputq(struct tipc_link * l)325 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
326 {
327 	return l->inputq;
328 }
329 
tipc_link_plane(struct tipc_link * l)330 char tipc_link_plane(struct tipc_link *l)
331 {
332 	return l->net_plane;
333 }
334 
tipc_link_update_caps(struct tipc_link * l,u16 capabilities)335 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
336 {
337 	l->peer_caps = capabilities;
338 }
339 
tipc_link_add_bc_peer(struct tipc_link * snd_l,struct tipc_link * uc_l,struct sk_buff_head * xmitq)340 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 			   struct tipc_link *uc_l,
342 			   struct sk_buff_head *xmitq)
343 {
344 	struct tipc_link *rcv_l = uc_l->bc_rcvlink;
345 
346 	snd_l->ackers++;
347 	rcv_l->acked = snd_l->snd_nxt - 1;
348 	snd_l->state = LINK_ESTABLISHED;
349 	tipc_link_build_bc_init_msg(uc_l, xmitq);
350 }
351 
tipc_link_remove_bc_peer(struct tipc_link * snd_l,struct tipc_link * rcv_l,struct sk_buff_head * xmitq)352 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 			      struct tipc_link *rcv_l,
354 			      struct sk_buff_head *xmitq)
355 {
356 	u16 ack = snd_l->snd_nxt - 1;
357 
358 	snd_l->ackers--;
359 	rcv_l->bc_peer_is_up = true;
360 	rcv_l->state = LINK_ESTABLISHED;
361 	tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 	trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
363 	tipc_link_reset(rcv_l);
364 	rcv_l->state = LINK_RESET;
365 	if (!snd_l->ackers) {
366 		trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
367 		tipc_link_reset(snd_l);
368 		snd_l->state = LINK_RESET;
369 		__skb_queue_purge(xmitq);
370 	}
371 }
372 
tipc_link_bc_peers(struct tipc_link * l)373 int tipc_link_bc_peers(struct tipc_link *l)
374 {
375 	return l->ackers;
376 }
377 
link_bc_rcv_gap(struct tipc_link * l)378 static u16 link_bc_rcv_gap(struct tipc_link *l)
379 {
380 	struct sk_buff *skb = skb_peek(&l->deferdq);
381 	u16 gap = 0;
382 
383 	if (more(l->snd_nxt, l->rcv_nxt))
384 		gap = l->snd_nxt - l->rcv_nxt;
385 	if (skb)
386 		gap = buf_seqno(skb) - l->rcv_nxt;
387 	return gap;
388 }
389 
tipc_link_set_mtu(struct tipc_link * l,int mtu)390 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
391 {
392 	l->mtu = mtu;
393 }
394 
tipc_link_mtu(struct tipc_link * l)395 int tipc_link_mtu(struct tipc_link *l)
396 {
397 	return l->mtu;
398 }
399 
tipc_link_rcv_nxt(struct tipc_link * l)400 u16 tipc_link_rcv_nxt(struct tipc_link *l)
401 {
402 	return l->rcv_nxt;
403 }
404 
tipc_link_acked(struct tipc_link * l)405 u16 tipc_link_acked(struct tipc_link *l)
406 {
407 	return l->acked;
408 }
409 
tipc_link_name(struct tipc_link * l)410 char *tipc_link_name(struct tipc_link *l)
411 {
412 	return l->name;
413 }
414 
tipc_link_state(struct tipc_link * l)415 u32 tipc_link_state(struct tipc_link *l)
416 {
417 	return l->state;
418 }
419 
420 /**
421  * tipc_link_create - create a new link
422  * @n: pointer to associated node
423  * @if_name: associated interface name
424  * @bearer_id: id (index) of associated bearer
425  * @tolerance: link tolerance to be used by link
426  * @net_plane: network plane (A,B,c..) this link belongs to
427  * @mtu: mtu to be advertised by link
428  * @priority: priority to be used by link
429  * @window: send window to be used by link
430  * @session: session to be used by link
431  * @ownnode: identity of own node
432  * @peer: node id of peer node
433  * @peer_caps: bitmap describing peer node capabilities
434  * @bc_sndlink: the namespace global link used for broadcast sending
435  * @bc_rcvlink: the peer specific link used for broadcast reception
436  * @inputq: queue to put messages ready for delivery
437  * @namedq: queue to put binding table update messages ready for delivery
438  * @link: return value, pointer to put the created link
439  *
440  * Returns true if link was created, otherwise false
441  */
tipc_link_create(struct net * net,char * if_name,int bearer_id,int tolerance,char net_plane,u32 mtu,int priority,int window,u32 session,u32 self,u32 peer,u8 * peer_id,u16 peer_caps,struct tipc_link * bc_sndlink,struct tipc_link * bc_rcvlink,struct sk_buff_head * inputq,struct sk_buff_head * namedq,struct tipc_link ** link)442 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
443 		      int tolerance, char net_plane, u32 mtu, int priority,
444 		      int window, u32 session, u32 self,
445 		      u32 peer, u8 *peer_id, u16 peer_caps,
446 		      struct tipc_link *bc_sndlink,
447 		      struct tipc_link *bc_rcvlink,
448 		      struct sk_buff_head *inputq,
449 		      struct sk_buff_head *namedq,
450 		      struct tipc_link **link)
451 {
452 	char peer_str[NODE_ID_STR_LEN] = {0,};
453 	char self_str[NODE_ID_STR_LEN] = {0,};
454 	struct tipc_link *l;
455 
456 	l = kzalloc(sizeof(*l), GFP_ATOMIC);
457 	if (!l)
458 		return false;
459 	*link = l;
460 	l->session = session;
461 
462 	/* Set link name for unicast links only */
463 	if (peer_id) {
464 		tipc_nodeid2string(self_str, tipc_own_id(net));
465 		if (strlen(self_str) > 16)
466 			sprintf(self_str, "%x", self);
467 		tipc_nodeid2string(peer_str, peer_id);
468 		if (strlen(peer_str) > 16)
469 			sprintf(peer_str, "%x", peer);
470 	}
471 	/* Peer i/f name will be completed by reset/activate message */
472 	snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
473 		 self_str, if_name, peer_str);
474 
475 	strcpy(l->if_name, if_name);
476 	l->addr = peer;
477 	l->peer_caps = peer_caps;
478 	l->net = net;
479 	l->in_session = false;
480 	l->bearer_id = bearer_id;
481 	l->tolerance = tolerance;
482 	if (bc_rcvlink)
483 		bc_rcvlink->tolerance = tolerance;
484 	l->net_plane = net_plane;
485 	l->advertised_mtu = mtu;
486 	l->mtu = mtu;
487 	l->priority = priority;
488 	tipc_link_set_queue_limits(l, window);
489 	l->ackers = 1;
490 	l->bc_sndlink = bc_sndlink;
491 	l->bc_rcvlink = bc_rcvlink;
492 	l->inputq = inputq;
493 	l->namedq = namedq;
494 	l->state = LINK_RESETTING;
495 	__skb_queue_head_init(&l->transmq);
496 	__skb_queue_head_init(&l->backlogq);
497 	__skb_queue_head_init(&l->deferdq);
498 	__skb_queue_head_init(&l->failover_deferdq);
499 	skb_queue_head_init(&l->wakeupq);
500 	skb_queue_head_init(l->inputq);
501 	return true;
502 }
503 
504 /**
505  * tipc_link_bc_create - create new link to be used for broadcast
506  * @n: pointer to associated node
507  * @mtu: mtu to be used initially if no peers
508  * @window: send window to be used
509  * @inputq: queue to put messages ready for delivery
510  * @namedq: queue to put binding table update messages ready for delivery
511  * @link: return value, pointer to put the created link
512  *
513  * Returns true if link was created, otherwise false
514  */
tipc_link_bc_create(struct net * net,u32 ownnode,u32 peer,int mtu,int window,u16 peer_caps,struct sk_buff_head * inputq,struct sk_buff_head * namedq,struct tipc_link * bc_sndlink,struct tipc_link ** link)515 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
516 			 int mtu, int window, u16 peer_caps,
517 			 struct sk_buff_head *inputq,
518 			 struct sk_buff_head *namedq,
519 			 struct tipc_link *bc_sndlink,
520 			 struct tipc_link **link)
521 {
522 	struct tipc_link *l;
523 
524 	if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
525 			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
526 			      NULL, inputq, namedq, link))
527 		return false;
528 
529 	l = *link;
530 	strcpy(l->name, tipc_bclink_name);
531 	trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
532 	tipc_link_reset(l);
533 	l->state = LINK_RESET;
534 	l->ackers = 0;
535 	l->bc_rcvlink = l;
536 
537 	/* Broadcast send link is always up */
538 	if (link_is_bc_sndlink(l))
539 		l->state = LINK_ESTABLISHED;
540 
541 	/* Disable replicast if even a single peer doesn't support it */
542 	if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
543 		tipc_bcast_disable_rcast(net);
544 
545 	return true;
546 }
547 
548 /**
549  * tipc_link_fsm_evt - link finite state machine
550  * @l: pointer to link
551  * @evt: state machine event to be processed
552  */
tipc_link_fsm_evt(struct tipc_link * l,int evt)553 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
554 {
555 	int rc = 0;
556 	int old_state = l->state;
557 
558 	switch (l->state) {
559 	case LINK_RESETTING:
560 		switch (evt) {
561 		case LINK_PEER_RESET_EVT:
562 			l->state = LINK_PEER_RESET;
563 			break;
564 		case LINK_RESET_EVT:
565 			l->state = LINK_RESET;
566 			break;
567 		case LINK_FAILURE_EVT:
568 		case LINK_FAILOVER_BEGIN_EVT:
569 		case LINK_ESTABLISH_EVT:
570 		case LINK_FAILOVER_END_EVT:
571 		case LINK_SYNCH_BEGIN_EVT:
572 		case LINK_SYNCH_END_EVT:
573 		default:
574 			goto illegal_evt;
575 		}
576 		break;
577 	case LINK_RESET:
578 		switch (evt) {
579 		case LINK_PEER_RESET_EVT:
580 			l->state = LINK_ESTABLISHING;
581 			break;
582 		case LINK_FAILOVER_BEGIN_EVT:
583 			l->state = LINK_FAILINGOVER;
584 		case LINK_FAILURE_EVT:
585 		case LINK_RESET_EVT:
586 		case LINK_ESTABLISH_EVT:
587 		case LINK_FAILOVER_END_EVT:
588 			break;
589 		case LINK_SYNCH_BEGIN_EVT:
590 		case LINK_SYNCH_END_EVT:
591 		default:
592 			goto illegal_evt;
593 		}
594 		break;
595 	case LINK_PEER_RESET:
596 		switch (evt) {
597 		case LINK_RESET_EVT:
598 			l->state = LINK_ESTABLISHING;
599 			break;
600 		case LINK_PEER_RESET_EVT:
601 		case LINK_ESTABLISH_EVT:
602 		case LINK_FAILURE_EVT:
603 			break;
604 		case LINK_SYNCH_BEGIN_EVT:
605 		case LINK_SYNCH_END_EVT:
606 		case LINK_FAILOVER_BEGIN_EVT:
607 		case LINK_FAILOVER_END_EVT:
608 		default:
609 			goto illegal_evt;
610 		}
611 		break;
612 	case LINK_FAILINGOVER:
613 		switch (evt) {
614 		case LINK_FAILOVER_END_EVT:
615 			l->state = LINK_RESET;
616 			break;
617 		case LINK_PEER_RESET_EVT:
618 		case LINK_RESET_EVT:
619 		case LINK_ESTABLISH_EVT:
620 		case LINK_FAILURE_EVT:
621 			break;
622 		case LINK_FAILOVER_BEGIN_EVT:
623 		case LINK_SYNCH_BEGIN_EVT:
624 		case LINK_SYNCH_END_EVT:
625 		default:
626 			goto illegal_evt;
627 		}
628 		break;
629 	case LINK_ESTABLISHING:
630 		switch (evt) {
631 		case LINK_ESTABLISH_EVT:
632 			l->state = LINK_ESTABLISHED;
633 			break;
634 		case LINK_FAILOVER_BEGIN_EVT:
635 			l->state = LINK_FAILINGOVER;
636 			break;
637 		case LINK_RESET_EVT:
638 			l->state = LINK_RESET;
639 			break;
640 		case LINK_FAILURE_EVT:
641 		case LINK_PEER_RESET_EVT:
642 		case LINK_SYNCH_BEGIN_EVT:
643 		case LINK_FAILOVER_END_EVT:
644 			break;
645 		case LINK_SYNCH_END_EVT:
646 		default:
647 			goto illegal_evt;
648 		}
649 		break;
650 	case LINK_ESTABLISHED:
651 		switch (evt) {
652 		case LINK_PEER_RESET_EVT:
653 			l->state = LINK_PEER_RESET;
654 			rc |= TIPC_LINK_DOWN_EVT;
655 			break;
656 		case LINK_FAILURE_EVT:
657 			l->state = LINK_RESETTING;
658 			rc |= TIPC_LINK_DOWN_EVT;
659 			break;
660 		case LINK_RESET_EVT:
661 			l->state = LINK_RESET;
662 			break;
663 		case LINK_ESTABLISH_EVT:
664 		case LINK_SYNCH_END_EVT:
665 			break;
666 		case LINK_SYNCH_BEGIN_EVT:
667 			l->state = LINK_SYNCHING;
668 			break;
669 		case LINK_FAILOVER_BEGIN_EVT:
670 		case LINK_FAILOVER_END_EVT:
671 		default:
672 			goto illegal_evt;
673 		}
674 		break;
675 	case LINK_SYNCHING:
676 		switch (evt) {
677 		case LINK_PEER_RESET_EVT:
678 			l->state = LINK_PEER_RESET;
679 			rc |= TIPC_LINK_DOWN_EVT;
680 			break;
681 		case LINK_FAILURE_EVT:
682 			l->state = LINK_RESETTING;
683 			rc |= TIPC_LINK_DOWN_EVT;
684 			break;
685 		case LINK_RESET_EVT:
686 			l->state = LINK_RESET;
687 			break;
688 		case LINK_ESTABLISH_EVT:
689 		case LINK_SYNCH_BEGIN_EVT:
690 			break;
691 		case LINK_SYNCH_END_EVT:
692 			l->state = LINK_ESTABLISHED;
693 			break;
694 		case LINK_FAILOVER_BEGIN_EVT:
695 		case LINK_FAILOVER_END_EVT:
696 		default:
697 			goto illegal_evt;
698 		}
699 		break;
700 	default:
701 		pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
702 	}
703 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
704 	return rc;
705 illegal_evt:
706 	pr_err("Illegal FSM event %x in state %x on link %s\n",
707 	       evt, l->state, l->name);
708 	trace_tipc_link_fsm(l->name, old_state, l->state, evt);
709 	return rc;
710 }
711 
712 /* link_profile_stats - update statistical profiling of traffic
713  */
link_profile_stats(struct tipc_link * l)714 static void link_profile_stats(struct tipc_link *l)
715 {
716 	struct sk_buff *skb;
717 	struct tipc_msg *msg;
718 	int length;
719 
720 	/* Update counters used in statistical profiling of send traffic */
721 	l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
722 	l->stats.queue_sz_counts++;
723 
724 	skb = skb_peek(&l->transmq);
725 	if (!skb)
726 		return;
727 	msg = buf_msg(skb);
728 	length = msg_size(msg);
729 
730 	if (msg_user(msg) == MSG_FRAGMENTER) {
731 		if (msg_type(msg) != FIRST_FRAGMENT)
732 			return;
733 		length = msg_size(msg_inner_hdr(msg));
734 	}
735 	l->stats.msg_lengths_total += length;
736 	l->stats.msg_length_counts++;
737 	if (length <= 64)
738 		l->stats.msg_length_profile[0]++;
739 	else if (length <= 256)
740 		l->stats.msg_length_profile[1]++;
741 	else if (length <= 1024)
742 		l->stats.msg_length_profile[2]++;
743 	else if (length <= 4096)
744 		l->stats.msg_length_profile[3]++;
745 	else if (length <= 16384)
746 		l->stats.msg_length_profile[4]++;
747 	else if (length <= 32768)
748 		l->stats.msg_length_profile[5]++;
749 	else
750 		l->stats.msg_length_profile[6]++;
751 }
752 
753 /**
754  * tipc_link_too_silent - check if link is "too silent"
755  * @l: tipc link to be checked
756  *
757  * Returns true if the link 'silent_intv_cnt' is about to reach the
758  * 'abort_limit' value, otherwise false
759  */
tipc_link_too_silent(struct tipc_link * l)760 bool tipc_link_too_silent(struct tipc_link *l)
761 {
762 	return (l->silent_intv_cnt + 2 > l->abort_limit);
763 }
764 
765 /* tipc_link_timeout - perform periodic task as instructed from node timeout
766  */
tipc_link_timeout(struct tipc_link * l,struct sk_buff_head * xmitq)767 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
768 {
769 	int mtyp = 0;
770 	int rc = 0;
771 	bool state = false;
772 	bool probe = false;
773 	bool setup = false;
774 	u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
775 	u16 bc_acked = l->bc_rcvlink->acked;
776 	struct tipc_mon_state *mstate = &l->mon_state;
777 
778 	trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
779 	trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
780 	switch (l->state) {
781 	case LINK_ESTABLISHED:
782 	case LINK_SYNCHING:
783 		mtyp = STATE_MSG;
784 		link_profile_stats(l);
785 		tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
786 		if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
787 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
788 		state = bc_acked != bc_snt;
789 		state |= l->bc_rcvlink->rcv_unacked;
790 		state |= l->rcv_unacked;
791 		state |= !skb_queue_empty(&l->transmq);
792 		state |= !skb_queue_empty(&l->deferdq);
793 		probe = mstate->probing;
794 		probe |= l->silent_intv_cnt;
795 		if (probe || mstate->monitoring)
796 			l->silent_intv_cnt++;
797 		break;
798 	case LINK_RESET:
799 		setup = l->rst_cnt++ <= 4;
800 		setup |= !(l->rst_cnt % 16);
801 		mtyp = RESET_MSG;
802 		break;
803 	case LINK_ESTABLISHING:
804 		setup = true;
805 		mtyp = ACTIVATE_MSG;
806 		break;
807 	case LINK_PEER_RESET:
808 	case LINK_RESETTING:
809 	case LINK_FAILINGOVER:
810 		break;
811 	default:
812 		break;
813 	}
814 
815 	if (state || probe || setup)
816 		tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
817 
818 	return rc;
819 }
820 
821 /**
822  * link_schedule_user - schedule a message sender for wakeup after congestion
823  * @l: congested link
824  * @hdr: header of message that is being sent
825  * Create pseudo msg to send back to user when congestion abates
826  */
link_schedule_user(struct tipc_link * l,struct tipc_msg * hdr)827 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
828 {
829 	u32 dnode = tipc_own_addr(l->net);
830 	u32 dport = msg_origport(hdr);
831 	struct sk_buff *skb;
832 
833 	/* Create and schedule wakeup pseudo message */
834 	skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
835 			      dnode, l->addr, dport, 0, 0);
836 	if (!skb)
837 		return -ENOBUFS;
838 	msg_set_dest_droppable(buf_msg(skb), true);
839 	TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
840 	skb_queue_tail(&l->wakeupq, skb);
841 	l->stats.link_congs++;
842 	trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
843 	return -ELINKCONG;
844 }
845 
846 /**
847  * link_prepare_wakeup - prepare users for wakeup after congestion
848  * @l: congested link
849  * Wake up a number of waiting users, as permitted by available space
850  * in the send queue
851  */
link_prepare_wakeup(struct tipc_link * l)852 static void link_prepare_wakeup(struct tipc_link *l)
853 {
854 	struct sk_buff_head *wakeupq = &l->wakeupq;
855 	struct sk_buff_head *inputq = l->inputq;
856 	struct sk_buff *skb, *tmp;
857 	struct sk_buff_head tmpq;
858 	int avail[5] = {0,};
859 	int imp = 0;
860 
861 	__skb_queue_head_init(&tmpq);
862 
863 	for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
864 		avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
865 
866 	skb_queue_walk_safe(wakeupq, skb, tmp) {
867 		imp = TIPC_SKB_CB(skb)->chain_imp;
868 		if (avail[imp] <= 0)
869 			continue;
870 		avail[imp]--;
871 		__skb_unlink(skb, wakeupq);
872 		__skb_queue_tail(&tmpq, skb);
873 	}
874 
875 	spin_lock_bh(&inputq->lock);
876 	skb_queue_splice_tail(&tmpq, inputq);
877 	spin_unlock_bh(&inputq->lock);
878 
879 }
880 
tipc_link_reset(struct tipc_link * l)881 void tipc_link_reset(struct tipc_link *l)
882 {
883 	struct sk_buff_head list;
884 	u32 imp;
885 
886 	__skb_queue_head_init(&list);
887 
888 	l->in_session = false;
889 	/* Force re-synch of peer session number before establishing */
890 	l->peer_session--;
891 	l->session++;
892 	l->mtu = l->advertised_mtu;
893 
894 	spin_lock_bh(&l->wakeupq.lock);
895 	skb_queue_splice_init(&l->wakeupq, &list);
896 	spin_unlock_bh(&l->wakeupq.lock);
897 
898 	spin_lock_bh(&l->inputq->lock);
899 	skb_queue_splice_init(&list, l->inputq);
900 	spin_unlock_bh(&l->inputq->lock);
901 
902 	__skb_queue_purge(&l->transmq);
903 	__skb_queue_purge(&l->deferdq);
904 	__skb_queue_purge(&l->backlogq);
905 	__skb_queue_purge(&l->failover_deferdq);
906 	for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
907 		l->backlog[imp].len = 0;
908 		l->backlog[imp].target_bskb = NULL;
909 	}
910 	kfree_skb(l->reasm_buf);
911 	kfree_skb(l->reasm_tnlmsg);
912 	kfree_skb(l->failover_reasm_skb);
913 	l->reasm_buf = NULL;
914 	l->reasm_tnlmsg = NULL;
915 	l->failover_reasm_skb = NULL;
916 	l->rcv_unacked = 0;
917 	l->snd_nxt = 1;
918 	l->rcv_nxt = 1;
919 	l->snd_nxt_state = 1;
920 	l->rcv_nxt_state = 1;
921 	l->acked = 0;
922 	l->silent_intv_cnt = 0;
923 	l->rst_cnt = 0;
924 	l->bc_peer_is_up = false;
925 	memset(&l->mon_state, 0, sizeof(l->mon_state));
926 	tipc_link_reset_stats(l);
927 }
928 
929 /**
930  * tipc_link_xmit(): enqueue buffer list according to queue situation
931  * @link: link to use
932  * @list: chain of buffers containing message
933  * @xmitq: returned list of packets to be sent by caller
934  *
935  * Consumes the buffer chain.
936  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
937  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
938  */
tipc_link_xmit(struct tipc_link * l,struct sk_buff_head * list,struct sk_buff_head * xmitq)939 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
940 		   struct sk_buff_head *xmitq)
941 {
942 	unsigned int maxwin = l->window;
943 	unsigned int mtu = l->mtu;
944 	u16 ack = l->rcv_nxt - 1;
945 	u16 seqno = l->snd_nxt;
946 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
947 	struct sk_buff_head *transmq = &l->transmq;
948 	struct sk_buff_head *backlogq = &l->backlogq;
949 	struct sk_buff *skb, *_skb, **tskb;
950 	int pkt_cnt = skb_queue_len(list);
951 	struct tipc_msg *hdr;
952 	int rc = 0;
953 	int imp;
954 
955 	if (pkt_cnt <= 0)
956 		return 0;
957 
958 	hdr = buf_msg(skb_peek(list));
959 	if (unlikely(msg_size(hdr) > mtu)) {
960 		pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
961 			skb_queue_len(list), msg_user(hdr),
962 			msg_type(hdr), msg_size(hdr), mtu);
963 		__skb_queue_purge(list);
964 		return -EMSGSIZE;
965 	}
966 
967 	imp = msg_importance(hdr);
968 	/* Allow oversubscription of one data msg per source at congestion */
969 	if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
970 		if (imp == TIPC_SYSTEM_IMPORTANCE) {
971 			pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
972 			return -ENOBUFS;
973 		}
974 		rc = link_schedule_user(l, hdr);
975 	}
976 
977 	if (pkt_cnt > 1) {
978 		l->stats.sent_fragmented++;
979 		l->stats.sent_fragments += pkt_cnt;
980 	}
981 
982 	/* Prepare each packet for sending, and add to relevant queue: */
983 	while (skb_queue_len(list)) {
984 		skb = skb_peek(list);
985 		hdr = buf_msg(skb);
986 		msg_set_seqno(hdr, seqno);
987 		msg_set_ack(hdr, ack);
988 		msg_set_bcast_ack(hdr, bc_ack);
989 
990 		if (likely(skb_queue_len(transmq) < maxwin)) {
991 			_skb = skb_clone(skb, GFP_ATOMIC);
992 			if (!_skb) {
993 				__skb_queue_purge(list);
994 				return -ENOBUFS;
995 			}
996 			__skb_dequeue(list);
997 			__skb_queue_tail(transmq, skb);
998 			/* next retransmit attempt */
999 			if (link_is_bc_sndlink(l))
1000 				TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1001 			__skb_queue_tail(xmitq, _skb);
1002 			TIPC_SKB_CB(skb)->ackers = l->ackers;
1003 			l->rcv_unacked = 0;
1004 			l->stats.sent_pkts++;
1005 			seqno++;
1006 			continue;
1007 		}
1008 		tskb = &l->backlog[imp].target_bskb;
1009 		if (tipc_msg_bundle(*tskb, hdr, mtu)) {
1010 			kfree_skb(__skb_dequeue(list));
1011 			l->stats.sent_bundled++;
1012 			continue;
1013 		}
1014 		if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
1015 			kfree_skb(__skb_dequeue(list));
1016 			__skb_queue_tail(backlogq, *tskb);
1017 			l->backlog[imp].len++;
1018 			l->stats.sent_bundled++;
1019 			l->stats.sent_bundles++;
1020 			continue;
1021 		}
1022 		l->backlog[imp].target_bskb = NULL;
1023 		l->backlog[imp].len += skb_queue_len(list);
1024 		skb_queue_splice_tail_init(list, backlogq);
1025 	}
1026 	l->snd_nxt = seqno;
1027 	return rc;
1028 }
1029 
tipc_link_advance_backlog(struct tipc_link * l,struct sk_buff_head * xmitq)1030 static void tipc_link_advance_backlog(struct tipc_link *l,
1031 				      struct sk_buff_head *xmitq)
1032 {
1033 	struct sk_buff *skb, *_skb;
1034 	struct tipc_msg *hdr;
1035 	u16 seqno = l->snd_nxt;
1036 	u16 ack = l->rcv_nxt - 1;
1037 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1038 	u32 imp;
1039 
1040 	while (skb_queue_len(&l->transmq) < l->window) {
1041 		skb = skb_peek(&l->backlogq);
1042 		if (!skb)
1043 			break;
1044 		_skb = skb_clone(skb, GFP_ATOMIC);
1045 		if (!_skb)
1046 			break;
1047 		__skb_dequeue(&l->backlogq);
1048 		hdr = buf_msg(skb);
1049 		imp = msg_importance(hdr);
1050 		l->backlog[imp].len--;
1051 		if (unlikely(skb == l->backlog[imp].target_bskb))
1052 			l->backlog[imp].target_bskb = NULL;
1053 		__skb_queue_tail(&l->transmq, skb);
1054 		/* next retransmit attempt */
1055 		if (link_is_bc_sndlink(l))
1056 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1057 
1058 		__skb_queue_tail(xmitq, _skb);
1059 		TIPC_SKB_CB(skb)->ackers = l->ackers;
1060 		msg_set_seqno(hdr, seqno);
1061 		msg_set_ack(hdr, ack);
1062 		msg_set_bcast_ack(hdr, bc_ack);
1063 		l->rcv_unacked = 0;
1064 		l->stats.sent_pkts++;
1065 		seqno++;
1066 	}
1067 	l->snd_nxt = seqno;
1068 }
1069 
1070 /**
1071  * link_retransmit_failure() - Detect repeated retransmit failures
1072  * @l: tipc link sender
1073  * @r: tipc link receiver (= l in case of unicast)
1074  * @rc: returned code
1075  *
1076  * Return: true if the repeated retransmit failures happens, otherwise
1077  * false
1078  */
link_retransmit_failure(struct tipc_link * l,struct tipc_link * r,int * rc)1079 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1080 				    int *rc)
1081 {
1082 	struct sk_buff *skb = skb_peek(&l->transmq);
1083 	struct tipc_msg *hdr;
1084 
1085 	if (!skb)
1086 		return false;
1087 
1088 	if (!TIPC_SKB_CB(skb)->retr_cnt)
1089 		return false;
1090 
1091 	if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1092 			msecs_to_jiffies(r->tolerance * 10)))
1093 		return false;
1094 
1095 	hdr = buf_msg(skb);
1096 	if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1097 		return false;
1098 
1099 	pr_warn("Retransmission failure on link <%s>\n", l->name);
1100 	link_print(l, "State of link ");
1101 	pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1102 		msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1103 	pr_info("sqno %u, prev: %x, dest: %x\n",
1104 		msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1105 	pr_info("retr_stamp %d, retr_cnt %d\n",
1106 		jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1107 		TIPC_SKB_CB(skb)->retr_cnt);
1108 
1109 	trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1110 	trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1111 	trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1112 
1113 	if (link_is_bc_sndlink(l)) {
1114 		r->state = LINK_RESET;
1115 		*rc = TIPC_LINK_DOWN_EVT;
1116 	} else {
1117 		*rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1118 	}
1119 
1120 	return true;
1121 }
1122 
1123 /* tipc_link_bc_retrans() - retransmit zero or more packets
1124  * @l: the link to transmit on
1125  * @r: the receiving link ordering the retransmit. Same as l if unicast
1126  * @from: retransmit from (inclusive) this sequence number
1127  * @to: retransmit to (inclusive) this sequence number
1128  * xmitq: queue for accumulating the retransmitted packets
1129  */
tipc_link_bc_retrans(struct tipc_link * l,struct tipc_link * r,u16 from,u16 to,struct sk_buff_head * xmitq)1130 static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1131 				u16 from, u16 to, struct sk_buff_head *xmitq)
1132 {
1133 	struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
1134 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1135 	u16 ack = l->rcv_nxt - 1;
1136 	struct tipc_msg *hdr;
1137 	int rc = 0;
1138 
1139 	if (!skb)
1140 		return 0;
1141 	if (less(to, from))
1142 		return 0;
1143 
1144 	trace_tipc_link_retrans(r, from, to, &l->transmq);
1145 
1146 	if (link_retransmit_failure(l, r, &rc))
1147 		return rc;
1148 
1149 	skb_queue_walk(&l->transmq, skb) {
1150 		hdr = buf_msg(skb);
1151 		if (less(msg_seqno(hdr), from))
1152 			continue;
1153 		if (more(msg_seqno(hdr), to))
1154 			break;
1155 
1156 		if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1157 			continue;
1158 		TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
1159 		_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
1160 		if (!_skb)
1161 			return 0;
1162 		hdr = buf_msg(_skb);
1163 		msg_set_ack(hdr, ack);
1164 		msg_set_bcast_ack(hdr, bc_ack);
1165 		_skb->priority = TC_PRIO_CONTROL;
1166 		__skb_queue_tail(xmitq, _skb);
1167 		l->stats.retransmitted++;
1168 
1169 		/* Increase actual retrans counter & mark first time */
1170 		if (!TIPC_SKB_CB(skb)->retr_cnt++)
1171 			TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1172 	}
1173 	return 0;
1174 }
1175 
1176 /* tipc_data_input - deliver data and name distr msgs to upper layer
1177  *
1178  * Consumes buffer if message is of right type
1179  * Node lock must be held
1180  */
tipc_data_input(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * inputq)1181 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1182 			    struct sk_buff_head *inputq)
1183 {
1184 	struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1185 	struct tipc_msg *hdr = buf_msg(skb);
1186 
1187 	switch (msg_user(hdr)) {
1188 	case TIPC_LOW_IMPORTANCE:
1189 	case TIPC_MEDIUM_IMPORTANCE:
1190 	case TIPC_HIGH_IMPORTANCE:
1191 	case TIPC_CRITICAL_IMPORTANCE:
1192 		if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1193 			skb_queue_tail(mc_inputq, skb);
1194 			return true;
1195 		}
1196 		/* fall through */
1197 	case CONN_MANAGER:
1198 		skb_queue_tail(inputq, skb);
1199 		return true;
1200 	case GROUP_PROTOCOL:
1201 		skb_queue_tail(mc_inputq, skb);
1202 		return true;
1203 	case NAME_DISTRIBUTOR:
1204 		l->bc_rcvlink->state = LINK_ESTABLISHED;
1205 		skb_queue_tail(l->namedq, skb);
1206 		return true;
1207 	case MSG_BUNDLER:
1208 	case TUNNEL_PROTOCOL:
1209 	case MSG_FRAGMENTER:
1210 	case BCAST_PROTOCOL:
1211 		return false;
1212 	default:
1213 		pr_warn("Dropping received illegal msg type\n");
1214 		kfree_skb(skb);
1215 		return true;
1216 	};
1217 }
1218 
1219 /* tipc_link_input - process packet that has passed link protocol check
1220  *
1221  * Consumes buffer
1222  */
tipc_link_input(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * inputq,struct sk_buff ** reasm_skb)1223 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1224 			   struct sk_buff_head *inputq,
1225 			   struct sk_buff **reasm_skb)
1226 {
1227 	struct tipc_msg *hdr = buf_msg(skb);
1228 	struct sk_buff *iskb;
1229 	struct sk_buff_head tmpq;
1230 	int usr = msg_user(hdr);
1231 	int pos = 0;
1232 
1233 	if (usr == MSG_BUNDLER) {
1234 		skb_queue_head_init(&tmpq);
1235 		l->stats.recv_bundles++;
1236 		l->stats.recv_bundled += msg_msgcnt(hdr);
1237 		while (tipc_msg_extract(skb, &iskb, &pos))
1238 			tipc_data_input(l, iskb, &tmpq);
1239 		tipc_skb_queue_splice_tail(&tmpq, inputq);
1240 		return 0;
1241 	} else if (usr == MSG_FRAGMENTER) {
1242 		l->stats.recv_fragments++;
1243 		if (tipc_buf_append(reasm_skb, &skb)) {
1244 			l->stats.recv_fragmented++;
1245 			tipc_data_input(l, skb, inputq);
1246 		} else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1247 			pr_warn_ratelimited("Unable to build fragment list\n");
1248 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1249 		}
1250 		return 0;
1251 	} else if (usr == BCAST_PROTOCOL) {
1252 		tipc_bcast_lock(l->net);
1253 		tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1254 		tipc_bcast_unlock(l->net);
1255 	}
1256 
1257 	kfree_skb(skb);
1258 	return 0;
1259 }
1260 
1261 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1262  *			 inner message along with the ones in the old link's
1263  *			 deferdq
1264  * @l: tunnel link
1265  * @skb: TUNNEL_PROTOCOL message
1266  * @inputq: queue to put messages ready for delivery
1267  */
tipc_link_tnl_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * inputq)1268 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1269 			     struct sk_buff_head *inputq)
1270 {
1271 	struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1272 	struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1273 	struct sk_buff_head *fdefq = &l->failover_deferdq;
1274 	struct tipc_msg *hdr = buf_msg(skb);
1275 	struct sk_buff *iskb;
1276 	int ipos = 0;
1277 	int rc = 0;
1278 	u16 seqno;
1279 
1280 	if (msg_type(hdr) == SYNCH_MSG) {
1281 		kfree_skb(skb);
1282 		return 0;
1283 	}
1284 
1285 	/* Not a fragment? */
1286 	if (likely(!msg_nof_fragms(hdr))) {
1287 		if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1288 			pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1289 					    skb_queue_len(fdefq));
1290 			return 0;
1291 		}
1292 		kfree_skb(skb);
1293 	} else {
1294 		/* Set fragment type for buf_append */
1295 		if (msg_fragm_no(hdr) == 1)
1296 			msg_set_type(hdr, FIRST_FRAGMENT);
1297 		else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1298 			msg_set_type(hdr, FRAGMENT);
1299 		else
1300 			msg_set_type(hdr, LAST_FRAGMENT);
1301 
1302 		if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1303 			/* Successful but non-complete reassembly? */
1304 			if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1305 				return 0;
1306 			pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1307 			return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1308 		}
1309 		iskb = skb;
1310 	}
1311 
1312 	do {
1313 		seqno = buf_seqno(iskb);
1314 		if (unlikely(less(seqno, l->drop_point))) {
1315 			kfree_skb(iskb);
1316 			continue;
1317 		}
1318 		if (unlikely(seqno != l->drop_point)) {
1319 			__tipc_skb_queue_sorted(fdefq, seqno, iskb);
1320 			continue;
1321 		}
1322 
1323 		l->drop_point++;
1324 		if (!tipc_data_input(l, iskb, inputq))
1325 			rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1326 		if (unlikely(rc))
1327 			break;
1328 	} while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1329 
1330 	return rc;
1331 }
1332 
tipc_link_release_pkts(struct tipc_link * l,u16 acked)1333 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1334 {
1335 	bool released = false;
1336 	struct sk_buff *skb, *tmp;
1337 
1338 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1339 		if (more(buf_seqno(skb), acked))
1340 			break;
1341 		__skb_unlink(skb, &l->transmq);
1342 		kfree_skb(skb);
1343 		released = true;
1344 	}
1345 	return released;
1346 }
1347 
1348 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1349  * @l: tipc link that data have come with gaps in sequence if any
1350  * @data: data buffer to store the Gap ACK blocks after built
1351  *
1352  * returns the actual allocated memory size
1353  */
tipc_build_gap_ack_blks(struct tipc_link * l,void * data)1354 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1355 {
1356 	struct sk_buff *skb = skb_peek(&l->deferdq);
1357 	struct tipc_gap_ack_blks *ga = data;
1358 	u16 len, expect, seqno = 0;
1359 	u8 n = 0;
1360 
1361 	if (!skb)
1362 		goto exit;
1363 
1364 	expect = buf_seqno(skb);
1365 	skb_queue_walk(&l->deferdq, skb) {
1366 		seqno = buf_seqno(skb);
1367 		if (unlikely(more(seqno, expect))) {
1368 			ga->gacks[n].ack = htons(expect - 1);
1369 			ga->gacks[n].gap = htons(seqno - expect);
1370 			if (++n >= MAX_GAP_ACK_BLKS) {
1371 				pr_info_ratelimited("Too few Gap ACK blocks!\n");
1372 				goto exit;
1373 			}
1374 		} else if (unlikely(less(seqno, expect))) {
1375 			pr_warn("Unexpected skb in deferdq!\n");
1376 			continue;
1377 		}
1378 		expect = seqno + 1;
1379 	}
1380 
1381 	/* last block */
1382 	ga->gacks[n].ack = htons(seqno);
1383 	ga->gacks[n].gap = 0;
1384 	n++;
1385 
1386 exit:
1387 	len = tipc_gap_ack_blks_sz(n);
1388 	ga->len = htons(len);
1389 	ga->gack_cnt = n;
1390 	return len;
1391 }
1392 
1393 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1394  *			       acked packets, also doing retransmissions if
1395  *			       gaps found
1396  * @l: tipc link with transmq queue to be advanced
1397  * @acked: seqno of last packet acked by peer without any gaps before
1398  * @gap: # of gap packets
1399  * @ga: buffer pointer to Gap ACK blocks from peer
1400  * @xmitq: queue for accumulating the retransmitted packets if any
1401  *
1402  * In case of a repeated retransmit failures, the call will return shortly
1403  * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
1404  */
tipc_link_advance_transmq(struct tipc_link * l,u16 acked,u16 gap,struct tipc_gap_ack_blks * ga,struct sk_buff_head * xmitq)1405 static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1406 				     struct tipc_gap_ack_blks *ga,
1407 				     struct sk_buff_head *xmitq)
1408 {
1409 	struct sk_buff *skb, *_skb, *tmp;
1410 	struct tipc_msg *hdr;
1411 	u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1412 	u16 ack = l->rcv_nxt - 1;
1413 	bool passed = false;
1414 	u16 seqno, n = 0;
1415 	int rc = 0;
1416 
1417 	skb_queue_walk_safe(&l->transmq, skb, tmp) {
1418 		seqno = buf_seqno(skb);
1419 
1420 next_gap_ack:
1421 		if (less_eq(seqno, acked)) {
1422 			/* release skb */
1423 			__skb_unlink(skb, &l->transmq);
1424 			kfree_skb(skb);
1425 		} else if (less_eq(seqno, acked + gap)) {
1426 			/* First, check if repeated retrans failures occurs? */
1427 			if (!passed && link_retransmit_failure(l, l, &rc))
1428 				return rc;
1429 			passed = true;
1430 
1431 			/* retransmit skb if unrestricted*/
1432 			if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1433 				continue;
1434 			TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1435 			_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
1436 					   GFP_ATOMIC);
1437 			if (!_skb)
1438 				continue;
1439 			hdr = buf_msg(_skb);
1440 			msg_set_ack(hdr, ack);
1441 			msg_set_bcast_ack(hdr, bc_ack);
1442 			_skb->priority = TC_PRIO_CONTROL;
1443 			__skb_queue_tail(xmitq, _skb);
1444 			l->stats.retransmitted++;
1445 
1446 			/* Increase actual retrans counter & mark first time */
1447 			if (!TIPC_SKB_CB(skb)->retr_cnt++)
1448 				TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1449 		} else {
1450 			/* retry with Gap ACK blocks if any */
1451 			if (!ga || n >= ga->gack_cnt)
1452 				break;
1453 			acked = ntohs(ga->gacks[n].ack);
1454 			gap = ntohs(ga->gacks[n].gap);
1455 			n++;
1456 			goto next_gap_ack;
1457 		}
1458 	}
1459 
1460 	return 0;
1461 }
1462 
1463 /* tipc_link_build_state_msg: prepare link state message for transmission
1464  *
1465  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1466  * risk of ack storms towards the sender
1467  */
tipc_link_build_state_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1468 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1469 {
1470 	if (!l)
1471 		return 0;
1472 
1473 	/* Broadcast ACK must be sent via a unicast link => defer to caller */
1474 	if (link_is_bc_rcvlink(l)) {
1475 		if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1476 			return 0;
1477 		l->rcv_unacked = 0;
1478 
1479 		/* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1480 		l->snd_nxt = l->rcv_nxt;
1481 		return TIPC_LINK_SND_STATE;
1482 	}
1483 
1484 	/* Unicast ACK */
1485 	l->rcv_unacked = 0;
1486 	l->stats.sent_acks++;
1487 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1488 	return 0;
1489 }
1490 
1491 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1492  */
tipc_link_build_reset_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1493 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1494 {
1495 	int mtyp = RESET_MSG;
1496 	struct sk_buff *skb;
1497 
1498 	if (l->state == LINK_ESTABLISHING)
1499 		mtyp = ACTIVATE_MSG;
1500 
1501 	tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1502 
1503 	/* Inform peer that this endpoint is going down if applicable */
1504 	skb = skb_peek_tail(xmitq);
1505 	if (skb && (l->state == LINK_RESET))
1506 		msg_set_peer_stopping(buf_msg(skb), 1);
1507 }
1508 
1509 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1510  * Note that sending of broadcast NACK is coordinated among nodes, to
1511  * reduce the risk of NACK storms towards the sender
1512  */
tipc_link_build_nack_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1513 static int tipc_link_build_nack_msg(struct tipc_link *l,
1514 				    struct sk_buff_head *xmitq)
1515 {
1516 	u32 def_cnt = ++l->stats.deferred_recv;
1517 	u32 defq_len = skb_queue_len(&l->deferdq);
1518 	int match1, match2;
1519 
1520 	if (link_is_bc_rcvlink(l)) {
1521 		match1 = def_cnt & 0xf;
1522 		match2 = tipc_own_addr(l->net) & 0xf;
1523 		if (match1 == match2)
1524 			return TIPC_LINK_SND_STATE;
1525 		return 0;
1526 	}
1527 
1528 	if (defq_len >= 3 && !((defq_len - 3) % 16))
1529 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1530 	return 0;
1531 }
1532 
1533 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1534  * @l: the link that should handle the message
1535  * @skb: TIPC packet
1536  * @xmitq: queue to place packets to be sent after this call
1537  */
tipc_link_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * xmitq)1538 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1539 		  struct sk_buff_head *xmitq)
1540 {
1541 	struct sk_buff_head *defq = &l->deferdq;
1542 	struct tipc_msg *hdr = buf_msg(skb);
1543 	u16 seqno, rcv_nxt, win_lim;
1544 	int rc = 0;
1545 
1546 	/* Verify and update link state */
1547 	if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1548 		return tipc_link_proto_rcv(l, skb, xmitq);
1549 
1550 	/* Don't send probe at next timeout expiration */
1551 	l->silent_intv_cnt = 0;
1552 
1553 	do {
1554 		hdr = buf_msg(skb);
1555 		seqno = msg_seqno(hdr);
1556 		rcv_nxt = l->rcv_nxt;
1557 		win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1558 
1559 		if (unlikely(!link_is_up(l))) {
1560 			if (l->state == LINK_ESTABLISHING)
1561 				rc = TIPC_LINK_UP_EVT;
1562 			goto drop;
1563 		}
1564 
1565 		/* Drop if outside receive window */
1566 		if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1567 			l->stats.duplicates++;
1568 			goto drop;
1569 		}
1570 
1571 		/* Forward queues and wake up waiting users */
1572 		if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1573 			tipc_link_advance_backlog(l, xmitq);
1574 			if (unlikely(!skb_queue_empty(&l->wakeupq)))
1575 				link_prepare_wakeup(l);
1576 		}
1577 
1578 		/* Defer delivery if sequence gap */
1579 		if (unlikely(seqno != rcv_nxt)) {
1580 			__tipc_skb_queue_sorted(defq, seqno, skb);
1581 			rc |= tipc_link_build_nack_msg(l, xmitq);
1582 			break;
1583 		}
1584 
1585 		/* Deliver packet */
1586 		l->rcv_nxt++;
1587 		l->stats.recv_pkts++;
1588 
1589 		if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1590 			rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1591 		else if (!tipc_data_input(l, skb, l->inputq))
1592 			rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1593 		if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1594 			rc |= tipc_link_build_state_msg(l, xmitq);
1595 		if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1596 			break;
1597 	} while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1598 
1599 	return rc;
1600 drop:
1601 	kfree_skb(skb);
1602 	return rc;
1603 }
1604 
tipc_link_build_proto_msg(struct tipc_link * l,int mtyp,bool probe,bool probe_reply,u16 rcvgap,int tolerance,int priority,struct sk_buff_head * xmitq)1605 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1606 				      bool probe_reply, u16 rcvgap,
1607 				      int tolerance, int priority,
1608 				      struct sk_buff_head *xmitq)
1609 {
1610 	struct tipc_link *bcl = l->bc_rcvlink;
1611 	struct sk_buff *skb;
1612 	struct tipc_msg *hdr;
1613 	struct sk_buff_head *dfq = &l->deferdq;
1614 	bool node_up = link_is_up(bcl);
1615 	struct tipc_mon_state *mstate = &l->mon_state;
1616 	int dlen = 0;
1617 	void *data;
1618 	u16 glen = 0;
1619 
1620 	/* Don't send protocol message during reset or link failover */
1621 	if (tipc_link_is_blocked(l))
1622 		return;
1623 
1624 	if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1625 		return;
1626 
1627 	if (!skb_queue_empty(dfq))
1628 		rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1629 
1630 	skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1631 			      tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1632 			      l->addr, tipc_own_addr(l->net), 0, 0, 0);
1633 	if (!skb)
1634 		return;
1635 
1636 	hdr = buf_msg(skb);
1637 	data = msg_data(hdr);
1638 	msg_set_session(hdr, l->session);
1639 	msg_set_bearer_id(hdr, l->bearer_id);
1640 	msg_set_net_plane(hdr, l->net_plane);
1641 	msg_set_next_sent(hdr, l->snd_nxt);
1642 	msg_set_ack(hdr, l->rcv_nxt - 1);
1643 	msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1644 	msg_set_bc_ack_invalid(hdr, !node_up);
1645 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1646 	msg_set_link_tolerance(hdr, tolerance);
1647 	msg_set_linkprio(hdr, priority);
1648 	msg_set_redundant_link(hdr, node_up);
1649 	msg_set_seq_gap(hdr, 0);
1650 	msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1651 
1652 	if (mtyp == STATE_MSG) {
1653 		if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1654 			msg_set_seqno(hdr, l->snd_nxt_state++);
1655 		msg_set_seq_gap(hdr, rcvgap);
1656 		msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1657 		msg_set_probe(hdr, probe);
1658 		msg_set_is_keepalive(hdr, probe || probe_reply);
1659 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1660 			glen = tipc_build_gap_ack_blks(l, data);
1661 		tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1662 		msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1663 		skb_trim(skb, INT_H_SIZE + glen + dlen);
1664 		l->stats.sent_states++;
1665 		l->rcv_unacked = 0;
1666 	} else {
1667 		/* RESET_MSG or ACTIVATE_MSG */
1668 		if (mtyp == ACTIVATE_MSG) {
1669 			msg_set_dest_session_valid(hdr, 1);
1670 			msg_set_dest_session(hdr, l->peer_session);
1671 		}
1672 		msg_set_max_pkt(hdr, l->advertised_mtu);
1673 		strcpy(data, l->if_name);
1674 		msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1675 		skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1676 	}
1677 	if (probe)
1678 		l->stats.sent_probes++;
1679 	if (rcvgap)
1680 		l->stats.sent_nacks++;
1681 	skb->priority = TC_PRIO_CONTROL;
1682 	__skb_queue_tail(xmitq, skb);
1683 	trace_tipc_proto_build(skb, false, l->name);
1684 }
1685 
tipc_link_create_dummy_tnl_msg(struct tipc_link * l,struct sk_buff_head * xmitq)1686 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1687 				    struct sk_buff_head *xmitq)
1688 {
1689 	u32 onode = tipc_own_addr(l->net);
1690 	struct tipc_msg *hdr, *ihdr;
1691 	struct sk_buff_head tnlq;
1692 	struct sk_buff *skb;
1693 	u32 dnode = l->addr;
1694 
1695 	__skb_queue_head_init(&tnlq);
1696 	skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1697 			      INT_H_SIZE, BASIC_H_SIZE,
1698 			      dnode, onode, 0, 0, 0);
1699 	if (!skb) {
1700 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1701 		return;
1702 	}
1703 
1704 	hdr = buf_msg(skb);
1705 	msg_set_msgcnt(hdr, 1);
1706 	msg_set_bearer_id(hdr, l->peer_bearer_id);
1707 
1708 	ihdr = (struct tipc_msg *)msg_data(hdr);
1709 	tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1710 		      BASIC_H_SIZE, dnode);
1711 	msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1712 	__skb_queue_tail(&tnlq, skb);
1713 	tipc_link_xmit(l, &tnlq, xmitq);
1714 }
1715 
1716 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1717  * with contents of the link's transmit and backlog queues.
1718  */
tipc_link_tnl_prepare(struct tipc_link * l,struct tipc_link * tnl,int mtyp,struct sk_buff_head * xmitq)1719 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1720 			   int mtyp, struct sk_buff_head *xmitq)
1721 {
1722 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1723 	struct sk_buff *skb, *tnlskb;
1724 	struct tipc_msg *hdr, tnlhdr;
1725 	struct sk_buff_head *queue = &l->transmq;
1726 	struct sk_buff_head tmpxq, tnlq, frags;
1727 	u16 pktlen, pktcnt, seqno = l->snd_nxt;
1728 	bool pktcnt_need_update = false;
1729 	u16 syncpt;
1730 	int rc;
1731 
1732 	if (!tnl)
1733 		return;
1734 
1735 	__skb_queue_head_init(&tnlq);
1736 	__skb_queue_head_init(&tmpxq);
1737 	__skb_queue_head_init(&frags);
1738 
1739 	/* At least one packet required for safe algorithm => add dummy */
1740 	skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1741 			      BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1742 			      0, 0, TIPC_ERR_NO_PORT);
1743 	if (!skb) {
1744 		pr_warn("%sunable to create tunnel packet\n", link_co_err);
1745 		return;
1746 	}
1747 	__skb_queue_tail(&tnlq, skb);
1748 	tipc_link_xmit(l, &tnlq, &tmpxq);
1749 	__skb_queue_purge(&tmpxq);
1750 
1751 	/* Link Synching:
1752 	 * From now on, send only one single ("dummy") SYNCH message
1753 	 * to peer. The SYNCH message does not contain any data, just
1754 	 * a header conveying the synch point to the peer.
1755 	 */
1756 	if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1757 		tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1758 					 INT_H_SIZE, 0, l->addr,
1759 					 tipc_own_addr(l->net),
1760 					 0, 0, 0);
1761 		if (!tnlskb) {
1762 			pr_warn("%sunable to create dummy SYNCH_MSG\n",
1763 				link_co_err);
1764 			return;
1765 		}
1766 
1767 		hdr = buf_msg(tnlskb);
1768 		syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1769 		msg_set_syncpt(hdr, syncpt);
1770 		msg_set_bearer_id(hdr, l->peer_bearer_id);
1771 		__skb_queue_tail(&tnlq, tnlskb);
1772 		tipc_link_xmit(tnl, &tnlq, xmitq);
1773 		return;
1774 	}
1775 
1776 	/* Initialize reusable tunnel packet header */
1777 	tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1778 		      mtyp, INT_H_SIZE, l->addr);
1779 	if (mtyp == SYNCH_MSG)
1780 		pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1781 	else
1782 		pktcnt = skb_queue_len(&l->transmq);
1783 	pktcnt += skb_queue_len(&l->backlogq);
1784 	msg_set_msgcnt(&tnlhdr, pktcnt);
1785 	msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1786 tnl:
1787 	/* Wrap each packet into a tunnel packet */
1788 	skb_queue_walk(queue, skb) {
1789 		hdr = buf_msg(skb);
1790 		if (queue == &l->backlogq)
1791 			msg_set_seqno(hdr, seqno++);
1792 		pktlen = msg_size(hdr);
1793 
1794 		/* Tunnel link MTU is not large enough? This could be
1795 		 * due to:
1796 		 * 1) Link MTU has just changed or set differently;
1797 		 * 2) Or FAILOVER on the top of a SYNCH message
1798 		 *
1799 		 * The 2nd case should not happen if peer supports
1800 		 * TIPC_TUNNEL_ENHANCED
1801 		 */
1802 		if (pktlen > tnl->mtu - INT_H_SIZE) {
1803 			if (mtyp == FAILOVER_MSG &&
1804 			    (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1805 				rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
1806 						       &frags);
1807 				if (rc) {
1808 					pr_warn("%sunable to frag msg: rc %d\n",
1809 						link_co_err, rc);
1810 					return;
1811 				}
1812 				pktcnt += skb_queue_len(&frags) - 1;
1813 				pktcnt_need_update = true;
1814 				skb_queue_splice_tail_init(&frags, &tnlq);
1815 				continue;
1816 			}
1817 			/* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
1818 			 * => Just warn it and return!
1819 			 */
1820 			pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
1821 					    link_co_err, msg_user(hdr),
1822 					    msg_type(hdr), msg_size(hdr));
1823 			return;
1824 		}
1825 
1826 		msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1827 		tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
1828 		if (!tnlskb) {
1829 			pr_warn("%sunable to send packet\n", link_co_err);
1830 			return;
1831 		}
1832 		skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1833 		skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1834 		__skb_queue_tail(&tnlq, tnlskb);
1835 	}
1836 	if (queue != &l->backlogq) {
1837 		queue = &l->backlogq;
1838 		goto tnl;
1839 	}
1840 
1841 	if (pktcnt_need_update)
1842 		skb_queue_walk(&tnlq, skb) {
1843 			hdr = buf_msg(skb);
1844 			msg_set_msgcnt(hdr, pktcnt);
1845 		}
1846 
1847 	tipc_link_xmit(tnl, &tnlq, xmitq);
1848 
1849 	if (mtyp == FAILOVER_MSG) {
1850 		tnl->drop_point = l->rcv_nxt;
1851 		tnl->failover_reasm_skb = l->reasm_buf;
1852 		l->reasm_buf = NULL;
1853 
1854 		/* Failover the link's deferdq */
1855 		if (unlikely(!skb_queue_empty(fdefq))) {
1856 			pr_warn("Link failover deferdq not empty: %d!\n",
1857 				skb_queue_len(fdefq));
1858 			__skb_queue_purge(fdefq);
1859 		}
1860 		skb_queue_splice_init(&l->deferdq, fdefq);
1861 	}
1862 }
1863 
1864 /**
1865  * tipc_link_failover_prepare() - prepare tnl for link failover
1866  *
1867  * This is a special version of the precursor - tipc_link_tnl_prepare(),
1868  * see the tipc_node_link_failover() for details
1869  *
1870  * @l: failover link
1871  * @tnl: tunnel link
1872  * @xmitq: queue for messages to be xmited
1873  */
tipc_link_failover_prepare(struct tipc_link * l,struct tipc_link * tnl,struct sk_buff_head * xmitq)1874 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1875 				struct sk_buff_head *xmitq)
1876 {
1877 	struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1878 
1879 	tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1880 
1881 	/* This failover link enpoint was never established before,
1882 	 * so it has not received anything from peer.
1883 	 * Otherwise, it must be a normal failover situation or the
1884 	 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1885 	 * would have to start over from scratch instead.
1886 	 */
1887 	tnl->drop_point = 1;
1888 	tnl->failover_reasm_skb = NULL;
1889 
1890 	/* Initiate the link's failover deferdq */
1891 	if (unlikely(!skb_queue_empty(fdefq))) {
1892 		pr_warn("Link failover deferdq not empty: %d!\n",
1893 			skb_queue_len(fdefq));
1894 		__skb_queue_purge(fdefq);
1895 	}
1896 }
1897 
1898 /* tipc_link_validate_msg(): validate message against current link state
1899  * Returns true if message should be accepted, otherwise false
1900  */
tipc_link_validate_msg(struct tipc_link * l,struct tipc_msg * hdr)1901 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1902 {
1903 	u16 curr_session = l->peer_session;
1904 	u16 session = msg_session(hdr);
1905 	int mtyp = msg_type(hdr);
1906 
1907 	if (msg_user(hdr) != LINK_PROTOCOL)
1908 		return true;
1909 
1910 	switch (mtyp) {
1911 	case RESET_MSG:
1912 		if (!l->in_session)
1913 			return true;
1914 		/* Accept only RESET with new session number */
1915 		return more(session, curr_session);
1916 	case ACTIVATE_MSG:
1917 		if (!l->in_session)
1918 			return true;
1919 		/* Accept only ACTIVATE with new or current session number */
1920 		return !less(session, curr_session);
1921 	case STATE_MSG:
1922 		/* Accept only STATE with current session number */
1923 		if (!l->in_session)
1924 			return false;
1925 		if (session != curr_session)
1926 			return false;
1927 		/* Extra sanity check */
1928 		if (!link_is_up(l) && msg_ack(hdr))
1929 			return false;
1930 		if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1931 			return true;
1932 		/* Accept only STATE with new sequence number */
1933 		return !less(msg_seqno(hdr), l->rcv_nxt_state);
1934 	default:
1935 		return false;
1936 	}
1937 }
1938 
1939 /* tipc_link_proto_rcv(): receive link level protocol message :
1940  * Note that network plane id propagates through the network, and may
1941  * change at any time. The node with lowest numerical id determines
1942  * network plane
1943  */
tipc_link_proto_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * xmitq)1944 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1945 			       struct sk_buff_head *xmitq)
1946 {
1947 	struct tipc_msg *hdr = buf_msg(skb);
1948 	struct tipc_gap_ack_blks *ga = NULL;
1949 	u16 rcvgap = 0;
1950 	u16 ack = msg_ack(hdr);
1951 	u16 gap = msg_seq_gap(hdr);
1952 	u16 peers_snd_nxt =  msg_next_sent(hdr);
1953 	u16 peers_tol = msg_link_tolerance(hdr);
1954 	u16 peers_prio = msg_linkprio(hdr);
1955 	u16 rcv_nxt = l->rcv_nxt;
1956 	u32 dlen = msg_data_sz(hdr), glen = 0;
1957 	int mtyp = msg_type(hdr);
1958 	bool reply = msg_probe(hdr);
1959 	void *data;
1960 	char *if_name;
1961 	int rc = 0;
1962 
1963 	trace_tipc_proto_rcv(skb, false, l->name);
1964 
1965 	if (dlen > U16_MAX)
1966 		goto exit;
1967 
1968 	if (tipc_link_is_blocked(l) || !xmitq)
1969 		goto exit;
1970 
1971 	if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1972 		l->net_plane = msg_net_plane(hdr);
1973 
1974 	if (skb_linearize(skb))
1975 		goto exit;
1976 
1977 	hdr = buf_msg(skb);
1978 	data = msg_data(hdr);
1979 
1980 	if (!tipc_link_validate_msg(l, hdr)) {
1981 		trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1982 		trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
1983 		goto exit;
1984 	}
1985 
1986 	switch (mtyp) {
1987 	case RESET_MSG:
1988 	case ACTIVATE_MSG:
1989 		/* Complete own link name with peer's interface name */
1990 		if_name =  strrchr(l->name, ':') + 1;
1991 		if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1992 			break;
1993 		if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1994 			break;
1995 		strncpy(if_name, data, TIPC_MAX_IF_NAME);
1996 
1997 		/* Update own tolerance if peer indicates a non-zero value */
1998 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
1999 			l->tolerance = peers_tol;
2000 			l->bc_rcvlink->tolerance = peers_tol;
2001 		}
2002 		/* Update own priority if peer's priority is higher */
2003 		if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2004 			l->priority = peers_prio;
2005 
2006 		/* If peer is going down we want full re-establish cycle */
2007 		if (msg_peer_stopping(hdr)) {
2008 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2009 			break;
2010 		}
2011 
2012 		/* If this endpoint was re-created while peer was ESTABLISHING
2013 		 * it doesn't know current session number. Force re-synch.
2014 		 */
2015 		if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2016 		    l->session != msg_dest_session(hdr)) {
2017 			if (less(l->session, msg_dest_session(hdr)))
2018 				l->session = msg_dest_session(hdr) + 1;
2019 			break;
2020 		}
2021 
2022 		/* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2023 		if (mtyp == RESET_MSG || !link_is_up(l))
2024 			rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2025 
2026 		/* ACTIVATE_MSG takes up link if it was already locally reset */
2027 		if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2028 			rc = TIPC_LINK_UP_EVT;
2029 
2030 		l->peer_session = msg_session(hdr);
2031 		l->in_session = true;
2032 		l->peer_bearer_id = msg_bearer_id(hdr);
2033 		if (l->mtu > msg_max_pkt(hdr))
2034 			l->mtu = msg_max_pkt(hdr);
2035 		break;
2036 
2037 	case STATE_MSG:
2038 		/* Receive Gap ACK blocks from peer if any */
2039 		if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
2040 			ga = (struct tipc_gap_ack_blks *)data;
2041 			glen = ntohs(ga->len);
2042 			/* sanity check: if failed, ignore Gap ACK blocks */
2043 			if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
2044 				ga = NULL;
2045 		}
2046 		if(glen > dlen)
2047 			break;
2048 
2049 		l->rcv_nxt_state = msg_seqno(hdr) + 1;
2050 
2051 		/* Update own tolerance if peer indicates a non-zero value */
2052 		if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2053 			l->tolerance = peers_tol;
2054 			l->bc_rcvlink->tolerance = peers_tol;
2055 		}
2056 		/* Update own prio if peer indicates a different value */
2057 		if ((peers_prio != l->priority) &&
2058 		    in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2059 			l->priority = peers_prio;
2060 			rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2061 		}
2062 
2063 		l->silent_intv_cnt = 0;
2064 		l->stats.recv_states++;
2065 		if (msg_probe(hdr))
2066 			l->stats.recv_probes++;
2067 
2068 		if (!link_is_up(l)) {
2069 			if (l->state == LINK_ESTABLISHING)
2070 				rc = TIPC_LINK_UP_EVT;
2071 			break;
2072 		}
2073 
2074 		tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2075 			     &l->mon_state, l->bearer_id);
2076 
2077 		/* Send NACK if peer has sent pkts we haven't received yet */
2078 		if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
2079 			rcvgap = peers_snd_nxt - l->rcv_nxt;
2080 		if (rcvgap || reply)
2081 			tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2082 						  rcvgap, 0, 0, xmitq);
2083 
2084 		rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
2085 
2086 		/* If NACK, retransmit will now start at right position */
2087 		if (gap)
2088 			l->stats.recv_nacks++;
2089 
2090 		tipc_link_advance_backlog(l, xmitq);
2091 		if (unlikely(!skb_queue_empty(&l->wakeupq)))
2092 			link_prepare_wakeup(l);
2093 	}
2094 exit:
2095 	kfree_skb(skb);
2096 	return rc;
2097 }
2098 
2099 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2100  */
tipc_link_build_bc_proto_msg(struct tipc_link * l,bool bcast,u16 peers_snd_nxt,struct sk_buff_head * xmitq)2101 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2102 					 u16 peers_snd_nxt,
2103 					 struct sk_buff_head *xmitq)
2104 {
2105 	struct sk_buff *skb;
2106 	struct tipc_msg *hdr;
2107 	struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2108 	u16 ack = l->rcv_nxt - 1;
2109 	u16 gap_to = peers_snd_nxt - 1;
2110 
2111 	skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2112 			      0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2113 	if (!skb)
2114 		return false;
2115 	hdr = buf_msg(skb);
2116 	msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2117 	msg_set_bcast_ack(hdr, ack);
2118 	msg_set_bcgap_after(hdr, ack);
2119 	if (dfrd_skb)
2120 		gap_to = buf_seqno(dfrd_skb) - 1;
2121 	msg_set_bcgap_to(hdr, gap_to);
2122 	msg_set_non_seq(hdr, bcast);
2123 	__skb_queue_tail(xmitq, skb);
2124 	return true;
2125 }
2126 
2127 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2128  *
2129  * Give a newly added peer node the sequence number where it should
2130  * start receiving and acking broadcast packets.
2131  */
tipc_link_build_bc_init_msg(struct tipc_link * l,struct sk_buff_head * xmitq)2132 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2133 					struct sk_buff_head *xmitq)
2134 {
2135 	struct sk_buff_head list;
2136 
2137 	__skb_queue_head_init(&list);
2138 	if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2139 		return;
2140 	msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2141 	tipc_link_xmit(l, &list, xmitq);
2142 }
2143 
2144 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2145  */
tipc_link_bc_init_rcv(struct tipc_link * l,struct tipc_msg * hdr)2146 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2147 {
2148 	int mtyp = msg_type(hdr);
2149 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2150 
2151 	if (link_is_up(l))
2152 		return;
2153 
2154 	if (msg_user(hdr) == BCAST_PROTOCOL) {
2155 		l->rcv_nxt = peers_snd_nxt;
2156 		l->state = LINK_ESTABLISHED;
2157 		return;
2158 	}
2159 
2160 	if (l->peer_caps & TIPC_BCAST_SYNCH)
2161 		return;
2162 
2163 	if (msg_peer_node_is_up(hdr))
2164 		return;
2165 
2166 	/* Compatibility: accept older, less safe initial synch data */
2167 	if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2168 		l->rcv_nxt = peers_snd_nxt;
2169 }
2170 
2171 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2172  */
tipc_link_bc_sync_rcv(struct tipc_link * l,struct tipc_msg * hdr,struct sk_buff_head * xmitq)2173 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2174 			  struct sk_buff_head *xmitq)
2175 {
2176 	struct tipc_link *snd_l = l->bc_sndlink;
2177 	u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2178 	u16 from = msg_bcast_ack(hdr) + 1;
2179 	u16 to = from + msg_bc_gap(hdr) - 1;
2180 	int rc = 0;
2181 
2182 	if (!link_is_up(l))
2183 		return rc;
2184 
2185 	if (!msg_peer_node_is_up(hdr))
2186 		return rc;
2187 
2188 	/* Open when peer ackowledges our bcast init msg (pkt #1) */
2189 	if (msg_ack(hdr))
2190 		l->bc_peer_is_up = true;
2191 
2192 	if (!l->bc_peer_is_up)
2193 		return rc;
2194 
2195 	l->stats.recv_nacks++;
2196 
2197 	/* Ignore if peers_snd_nxt goes beyond receive window */
2198 	if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2199 		return rc;
2200 
2201 	rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
2202 
2203 	l->snd_nxt = peers_snd_nxt;
2204 	if (link_bc_rcv_gap(l))
2205 		rc |= TIPC_LINK_SND_STATE;
2206 
2207 	/* Return now if sender supports nack via STATE messages */
2208 	if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2209 		return rc;
2210 
2211 	/* Otherwise, be backwards compatible */
2212 
2213 	if (!more(peers_snd_nxt, l->rcv_nxt)) {
2214 		l->nack_state = BC_NACK_SND_CONDITIONAL;
2215 		return 0;
2216 	}
2217 
2218 	/* Don't NACK if one was recently sent or peeked */
2219 	if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2220 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2221 		return 0;
2222 	}
2223 
2224 	/* Conditionally delay NACK sending until next synch rcv */
2225 	if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2226 		l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2227 		if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2228 			return 0;
2229 	}
2230 
2231 	/* Send NACK now but suppress next one */
2232 	tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2233 	l->nack_state = BC_NACK_SND_SUPPRESS;
2234 	return 0;
2235 }
2236 
tipc_link_bc_ack_rcv(struct tipc_link * l,u16 acked,struct sk_buff_head * xmitq)2237 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2238 			  struct sk_buff_head *xmitq)
2239 {
2240 	struct sk_buff *skb, *tmp;
2241 	struct tipc_link *snd_l = l->bc_sndlink;
2242 
2243 	if (!link_is_up(l) || !l->bc_peer_is_up)
2244 		return;
2245 
2246 	if (!more(acked, l->acked))
2247 		return;
2248 
2249 	trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
2250 	/* Skip over packets peer has already acked */
2251 	skb_queue_walk(&snd_l->transmq, skb) {
2252 		if (more(buf_seqno(skb), l->acked))
2253 			break;
2254 	}
2255 
2256 	/* Update/release the packets peer is acking now */
2257 	skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2258 		if (more(buf_seqno(skb), acked))
2259 			break;
2260 		if (!--TIPC_SKB_CB(skb)->ackers) {
2261 			__skb_unlink(skb, &snd_l->transmq);
2262 			kfree_skb(skb);
2263 		}
2264 	}
2265 	l->acked = acked;
2266 	tipc_link_advance_backlog(snd_l, xmitq);
2267 	if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2268 		link_prepare_wakeup(snd_l);
2269 }
2270 
2271 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2272  * This function is here for backwards compatibility, since
2273  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2274  */
tipc_link_bc_nack_rcv(struct tipc_link * l,struct sk_buff * skb,struct sk_buff_head * xmitq)2275 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2276 			  struct sk_buff_head *xmitq)
2277 {
2278 	struct tipc_msg *hdr = buf_msg(skb);
2279 	u32 dnode = msg_destnode(hdr);
2280 	int mtyp = msg_type(hdr);
2281 	u16 acked = msg_bcast_ack(hdr);
2282 	u16 from = acked + 1;
2283 	u16 to = msg_bcgap_to(hdr);
2284 	u16 peers_snd_nxt = to + 1;
2285 	int rc = 0;
2286 
2287 	kfree_skb(skb);
2288 
2289 	if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2290 		return 0;
2291 
2292 	if (mtyp != STATE_MSG)
2293 		return 0;
2294 
2295 	if (dnode == tipc_own_addr(l->net)) {
2296 		tipc_link_bc_ack_rcv(l, acked, xmitq);
2297 		rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
2298 		l->stats.recv_nacks++;
2299 		return rc;
2300 	}
2301 
2302 	/* Msg for other node => suppress own NACK at next sync if applicable */
2303 	if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2304 		l->nack_state = BC_NACK_SND_SUPPRESS;
2305 
2306 	return 0;
2307 }
2308 
tipc_link_set_queue_limits(struct tipc_link * l,u32 win)2309 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
2310 {
2311 	int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2312 
2313 	l->window = win;
2314 	l->backlog[TIPC_LOW_IMPORTANCE].limit      = max_t(u16, 50, win);
2315 	l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = max_t(u16, 100, win * 2);
2316 	l->backlog[TIPC_HIGH_IMPORTANCE].limit     = max_t(u16, 150, win * 3);
2317 	l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
2318 	l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2319 }
2320 
2321 /**
2322  * link_reset_stats - reset link statistics
2323  * @l: pointer to link
2324  */
tipc_link_reset_stats(struct tipc_link * l)2325 void tipc_link_reset_stats(struct tipc_link *l)
2326 {
2327 	memset(&l->stats, 0, sizeof(l->stats));
2328 }
2329 
link_print(struct tipc_link * l,const char * str)2330 static void link_print(struct tipc_link *l, const char *str)
2331 {
2332 	struct sk_buff *hskb = skb_peek(&l->transmq);
2333 	u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2334 	u16 tail = l->snd_nxt - 1;
2335 
2336 	pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2337 	pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2338 		skb_queue_len(&l->transmq), head, tail,
2339 		skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2340 }
2341 
2342 /* Parse and validate nested (link) properties valid for media, bearer and link
2343  */
tipc_nl_parse_link_prop(struct nlattr * prop,struct nlattr * props[])2344 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2345 {
2346 	int err;
2347 
2348 	err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2349 					  tipc_nl_prop_policy, NULL);
2350 	if (err)
2351 		return err;
2352 
2353 	if (props[TIPC_NLA_PROP_PRIO]) {
2354 		u32 prio;
2355 
2356 		prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2357 		if (prio > TIPC_MAX_LINK_PRI)
2358 			return -EINVAL;
2359 	}
2360 
2361 	if (props[TIPC_NLA_PROP_TOL]) {
2362 		u32 tol;
2363 
2364 		tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2365 		if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2366 			return -EINVAL;
2367 	}
2368 
2369 	if (props[TIPC_NLA_PROP_WIN]) {
2370 		u32 win;
2371 
2372 		win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2373 		if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2374 			return -EINVAL;
2375 	}
2376 
2377 	return 0;
2378 }
2379 
__tipc_nl_add_stats(struct sk_buff * skb,struct tipc_stats * s)2380 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2381 {
2382 	int i;
2383 	struct nlattr *stats;
2384 
2385 	struct nla_map {
2386 		u32 key;
2387 		u32 val;
2388 	};
2389 
2390 	struct nla_map map[] = {
2391 		{TIPC_NLA_STATS_RX_INFO, 0},
2392 		{TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2393 		{TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2394 		{TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2395 		{TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2396 		{TIPC_NLA_STATS_TX_INFO, 0},
2397 		{TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2398 		{TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2399 		{TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2400 		{TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2401 		{TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2402 			s->msg_length_counts : 1},
2403 		{TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2404 		{TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2405 		{TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2406 		{TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2407 		{TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2408 		{TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2409 		{TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2410 		{TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2411 		{TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2412 		{TIPC_NLA_STATS_RX_STATES, s->recv_states},
2413 		{TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2414 		{TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2415 		{TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2416 		{TIPC_NLA_STATS_TX_STATES, s->sent_states},
2417 		{TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2418 		{TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2419 		{TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2420 		{TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2421 		{TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2422 		{TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2423 		{TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2424 		{TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2425 			(s->accu_queue_sz / s->queue_sz_counts) : 0}
2426 	};
2427 
2428 	stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2429 	if (!stats)
2430 		return -EMSGSIZE;
2431 
2432 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2433 		if (nla_put_u32(skb, map[i].key, map[i].val))
2434 			goto msg_full;
2435 
2436 	nla_nest_end(skb, stats);
2437 
2438 	return 0;
2439 msg_full:
2440 	nla_nest_cancel(skb, stats);
2441 
2442 	return -EMSGSIZE;
2443 }
2444 
2445 /* Caller should hold appropriate locks to protect the link */
__tipc_nl_add_link(struct net * net,struct tipc_nl_msg * msg,struct tipc_link * link,int nlflags)2446 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2447 		       struct tipc_link *link, int nlflags)
2448 {
2449 	u32 self = tipc_own_addr(net);
2450 	struct nlattr *attrs;
2451 	struct nlattr *prop;
2452 	void *hdr;
2453 	int err;
2454 
2455 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2456 			  nlflags, TIPC_NL_LINK_GET);
2457 	if (!hdr)
2458 		return -EMSGSIZE;
2459 
2460 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2461 	if (!attrs)
2462 		goto msg_full;
2463 
2464 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2465 		goto attr_msg_full;
2466 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2467 		goto attr_msg_full;
2468 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2469 		goto attr_msg_full;
2470 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2471 		goto attr_msg_full;
2472 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2473 		goto attr_msg_full;
2474 
2475 	if (tipc_link_is_up(link))
2476 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2477 			goto attr_msg_full;
2478 	if (link->active)
2479 		if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2480 			goto attr_msg_full;
2481 
2482 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2483 	if (!prop)
2484 		goto attr_msg_full;
2485 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2486 		goto prop_msg_full;
2487 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2488 		goto prop_msg_full;
2489 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2490 			link->window))
2491 		goto prop_msg_full;
2492 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2493 		goto prop_msg_full;
2494 	nla_nest_end(msg->skb, prop);
2495 
2496 	err = __tipc_nl_add_stats(msg->skb, &link->stats);
2497 	if (err)
2498 		goto attr_msg_full;
2499 
2500 	nla_nest_end(msg->skb, attrs);
2501 	genlmsg_end(msg->skb, hdr);
2502 
2503 	return 0;
2504 
2505 prop_msg_full:
2506 	nla_nest_cancel(msg->skb, prop);
2507 attr_msg_full:
2508 	nla_nest_cancel(msg->skb, attrs);
2509 msg_full:
2510 	genlmsg_cancel(msg->skb, hdr);
2511 
2512 	return -EMSGSIZE;
2513 }
2514 
__tipc_nl_add_bc_link_stat(struct sk_buff * skb,struct tipc_stats * stats)2515 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2516 				      struct tipc_stats *stats)
2517 {
2518 	int i;
2519 	struct nlattr *nest;
2520 
2521 	struct nla_map {
2522 		__u32 key;
2523 		__u32 val;
2524 	};
2525 
2526 	struct nla_map map[] = {
2527 		{TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2528 		{TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2529 		{TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2530 		{TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2531 		{TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2532 		{TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2533 		{TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2534 		{TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2535 		{TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2536 		{TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2537 		{TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2538 		{TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2539 		{TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2540 		{TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2541 		{TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2542 		{TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2543 		{TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2544 		{TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2545 		{TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2546 			(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2547 	};
2548 
2549 	nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2550 	if (!nest)
2551 		return -EMSGSIZE;
2552 
2553 	for (i = 0; i <  ARRAY_SIZE(map); i++)
2554 		if (nla_put_u32(skb, map[i].key, map[i].val))
2555 			goto msg_full;
2556 
2557 	nla_nest_end(skb, nest);
2558 
2559 	return 0;
2560 msg_full:
2561 	nla_nest_cancel(skb, nest);
2562 
2563 	return -EMSGSIZE;
2564 }
2565 
tipc_nl_add_bc_link(struct net * net,struct tipc_nl_msg * msg)2566 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2567 {
2568 	int err;
2569 	void *hdr;
2570 	struct nlattr *attrs;
2571 	struct nlattr *prop;
2572 	struct tipc_net *tn = net_generic(net, tipc_net_id);
2573 	u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2574 	u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2575 	struct tipc_link *bcl = tn->bcl;
2576 
2577 	if (!bcl)
2578 		return 0;
2579 
2580 	tipc_bcast_lock(net);
2581 
2582 	hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2583 			  NLM_F_MULTI, TIPC_NL_LINK_GET);
2584 	if (!hdr) {
2585 		tipc_bcast_unlock(net);
2586 		return -EMSGSIZE;
2587 	}
2588 
2589 	attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2590 	if (!attrs)
2591 		goto msg_full;
2592 
2593 	/* The broadcast link is always up */
2594 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2595 		goto attr_msg_full;
2596 
2597 	if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2598 		goto attr_msg_full;
2599 	if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2600 		goto attr_msg_full;
2601 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2602 		goto attr_msg_full;
2603 	if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2604 		goto attr_msg_full;
2605 
2606 	prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2607 	if (!prop)
2608 		goto attr_msg_full;
2609 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2610 		goto prop_msg_full;
2611 	if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2612 		goto prop_msg_full;
2613 	if (bc_mode & BCLINK_MODE_SEL)
2614 		if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2615 				bc_ratio))
2616 			goto prop_msg_full;
2617 	nla_nest_end(msg->skb, prop);
2618 
2619 	err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2620 	if (err)
2621 		goto attr_msg_full;
2622 
2623 	tipc_bcast_unlock(net);
2624 	nla_nest_end(msg->skb, attrs);
2625 	genlmsg_end(msg->skb, hdr);
2626 
2627 	return 0;
2628 
2629 prop_msg_full:
2630 	nla_nest_cancel(msg->skb, prop);
2631 attr_msg_full:
2632 	nla_nest_cancel(msg->skb, attrs);
2633 msg_full:
2634 	tipc_bcast_unlock(net);
2635 	genlmsg_cancel(msg->skb, hdr);
2636 
2637 	return -EMSGSIZE;
2638 }
2639 
tipc_link_set_tolerance(struct tipc_link * l,u32 tol,struct sk_buff_head * xmitq)2640 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2641 			     struct sk_buff_head *xmitq)
2642 {
2643 	l->tolerance = tol;
2644 	if (l->bc_rcvlink)
2645 		l->bc_rcvlink->tolerance = tol;
2646 	if (link_is_up(l))
2647 		tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2648 }
2649 
tipc_link_set_prio(struct tipc_link * l,u32 prio,struct sk_buff_head * xmitq)2650 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2651 			struct sk_buff_head *xmitq)
2652 {
2653 	l->priority = prio;
2654 	tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2655 }
2656 
tipc_link_set_abort_limit(struct tipc_link * l,u32 limit)2657 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2658 {
2659 	l->abort_limit = limit;
2660 }
2661 
tipc_link_name_ext(struct tipc_link * l,char * buf)2662 char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2663 {
2664 	if (!l)
2665 		scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2666 	else if (link_is_bc_sndlink(l))
2667 		scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2668 	else if (link_is_bc_rcvlink(l))
2669 		scnprintf(buf, TIPC_MAX_LINK_NAME,
2670 			  "broadcast-receiver, peer %x", l->addr);
2671 	else
2672 		memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2673 
2674 	return buf;
2675 }
2676 
2677 /**
2678  * tipc_link_dump - dump TIPC link data
2679  * @l: tipc link to be dumped
2680  * @dqueues: bitmask to decide if any link queue to be dumped?
2681  *           - TIPC_DUMP_NONE: don't dump link queues
2682  *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2683  *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2684  *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2685  *           - TIPC_DUMP_INPUTQ: dump link input queue
2686  *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2687  *           - TIPC_DUMP_ALL: dump all the link queues above
2688  * @buf: returned buffer of dump data in format
2689  */
tipc_link_dump(struct tipc_link * l,u16 dqueues,char * buf)2690 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2691 {
2692 	int i = 0;
2693 	size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2694 	struct sk_buff_head *list;
2695 	struct sk_buff *hskb, *tskb;
2696 	u32 len;
2697 
2698 	if (!l) {
2699 		i += scnprintf(buf, sz, "link data: (null)\n");
2700 		return i;
2701 	}
2702 
2703 	i += scnprintf(buf, sz, "link data: %x", l->addr);
2704 	i += scnprintf(buf + i, sz - i, " %x", l->state);
2705 	i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2706 	i += scnprintf(buf + i, sz - i, " %u", l->session);
2707 	i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2708 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2709 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2710 	i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2711 	i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2712 	i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2713 	i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2714 	i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2715 	i += scnprintf(buf + i, sz - i, " %u", 0);
2716 	i += scnprintf(buf + i, sz - i, " %u", 0);
2717 	i += scnprintf(buf + i, sz - i, " %u", l->acked);
2718 
2719 	list = &l->transmq;
2720 	len = skb_queue_len(list);
2721 	hskb = skb_peek(list);
2722 	tskb = skb_peek_tail(list);
2723 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2724 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2725 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2726 
2727 	list = &l->deferdq;
2728 	len = skb_queue_len(list);
2729 	hskb = skb_peek(list);
2730 	tskb = skb_peek_tail(list);
2731 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2732 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2733 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2734 
2735 	list = &l->backlogq;
2736 	len = skb_queue_len(list);
2737 	hskb = skb_peek(list);
2738 	tskb = skb_peek_tail(list);
2739 	i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2740 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2741 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2742 
2743 	list = l->inputq;
2744 	len = skb_queue_len(list);
2745 	hskb = skb_peek(list);
2746 	tskb = skb_peek_tail(list);
2747 	i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2748 		       (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2749 		       (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2750 
2751 	if (dqueues & TIPC_DUMP_TRANSMQ) {
2752 		i += scnprintf(buf + i, sz - i, "transmq: ");
2753 		i += tipc_list_dump(&l->transmq, false, buf + i);
2754 	}
2755 	if (dqueues & TIPC_DUMP_BACKLOGQ) {
2756 		i += scnprintf(buf + i, sz - i,
2757 			       "backlogq: <%u %u %u %u %u>, ",
2758 			       l->backlog[TIPC_LOW_IMPORTANCE].len,
2759 			       l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2760 			       l->backlog[TIPC_HIGH_IMPORTANCE].len,
2761 			       l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2762 			       l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2763 		i += tipc_list_dump(&l->backlogq, false, buf + i);
2764 	}
2765 	if (dqueues & TIPC_DUMP_DEFERDQ) {
2766 		i += scnprintf(buf + i, sz - i, "deferdq: ");
2767 		i += tipc_list_dump(&l->deferdq, false, buf + i);
2768 	}
2769 	if (dqueues & TIPC_DUMP_INPUTQ) {
2770 		i += scnprintf(buf + i, sz - i, "inputq: ");
2771 		i += tipc_list_dump(l->inputq, false, buf + i);
2772 	}
2773 	if (dqueues & TIPC_DUMP_WAKEUP) {
2774 		i += scnprintf(buf + i, sz - i, "wakeup: ");
2775 		i += tipc_list_dump(&l->wakeupq, false, buf + i);
2776 	}
2777 
2778 	return i;
2779 }
2780