• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Peer event handling, typically ICMP messages.
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/net.h>
14 #include <linux/skbuff.h>
15 #include <linux/errqueue.h>
16 #include <linux/udp.h>
17 #include <linux/in.h>
18 #include <linux/in6.h>
19 #include <linux/icmp.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #include <net/ip.h>
23 #include "ar-internal.h"
24 
25 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
26 static void rxrpc_distribute_error(struct rxrpc_peer *, int,
27 				   enum rxrpc_call_completion);
28 
29 /*
30  * Find the peer associated with an ICMP packet.
31  */
rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local * local,const struct sk_buff * skb,struct sockaddr_rxrpc * srx)32 static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
33 						     const struct sk_buff *skb,
34 						     struct sockaddr_rxrpc *srx)
35 {
36 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
37 
38 	_enter("");
39 
40 	memset(srx, 0, sizeof(*srx));
41 	srx->transport_type = local->srx.transport_type;
42 	srx->transport_len = local->srx.transport_len;
43 	srx->transport.family = local->srx.transport.family;
44 
45 	/* Can we see an ICMP4 packet on an ICMP6 listening socket?  and vice
46 	 * versa?
47 	 */
48 	switch (srx->transport.family) {
49 	case AF_INET:
50 		srx->transport.sin.sin_port = serr->port;
51 		switch (serr->ee.ee_origin) {
52 		case SO_EE_ORIGIN_ICMP:
53 			_net("Rx ICMP");
54 			memcpy(&srx->transport.sin.sin_addr,
55 			       skb_network_header(skb) + serr->addr_offset,
56 			       sizeof(struct in_addr));
57 			break;
58 		case SO_EE_ORIGIN_ICMP6:
59 			_net("Rx ICMP6 on v4 sock");
60 			memcpy(&srx->transport.sin.sin_addr,
61 			       skb_network_header(skb) + serr->addr_offset + 12,
62 			       sizeof(struct in_addr));
63 			break;
64 		default:
65 			memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
66 			       sizeof(struct in_addr));
67 			break;
68 		}
69 		break;
70 
71 #ifdef CONFIG_AF_RXRPC_IPV6
72 	case AF_INET6:
73 		srx->transport.sin6.sin6_port = serr->port;
74 		switch (serr->ee.ee_origin) {
75 		case SO_EE_ORIGIN_ICMP6:
76 			_net("Rx ICMP6");
77 			memcpy(&srx->transport.sin6.sin6_addr,
78 			       skb_network_header(skb) + serr->addr_offset,
79 			       sizeof(struct in6_addr));
80 			break;
81 		case SO_EE_ORIGIN_ICMP:
82 			_net("Rx ICMP on v6 sock");
83 			srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
84 			srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
85 			srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
86 			memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12,
87 			       skb_network_header(skb) + serr->addr_offset,
88 			       sizeof(struct in_addr));
89 			break;
90 		default:
91 			memcpy(&srx->transport.sin6.sin6_addr,
92 			       &ipv6_hdr(skb)->saddr,
93 			       sizeof(struct in6_addr));
94 			break;
95 		}
96 		break;
97 #endif
98 
99 	default:
100 		BUG();
101 	}
102 
103 	return rxrpc_lookup_peer_rcu(local, srx);
104 }
105 
106 /*
107  * Handle an MTU/fragmentation problem.
108  */
rxrpc_adjust_mtu(struct rxrpc_peer * peer,struct sock_exterr_skb * serr)109 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
110 {
111 	u32 mtu = serr->ee.ee_info;
112 
113 	_net("Rx ICMP Fragmentation Needed (%d)", mtu);
114 
115 	/* wind down the local interface MTU */
116 	if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
117 		peer->if_mtu = mtu;
118 		_net("I/F MTU %u", mtu);
119 	}
120 
121 	if (mtu == 0) {
122 		/* they didn't give us a size, estimate one */
123 		mtu = peer->if_mtu;
124 		if (mtu > 1500) {
125 			mtu >>= 1;
126 			if (mtu < 1500)
127 				mtu = 1500;
128 		} else {
129 			mtu -= 100;
130 			if (mtu < peer->hdrsize)
131 				mtu = peer->hdrsize + 4;
132 		}
133 	}
134 
135 	if (mtu < peer->mtu) {
136 		spin_lock_bh(&peer->lock);
137 		peer->mtu = mtu;
138 		peer->maxdata = peer->mtu - peer->hdrsize;
139 		spin_unlock_bh(&peer->lock);
140 		_net("Net MTU %u (maxdata %u)",
141 		     peer->mtu, peer->maxdata);
142 	}
143 }
144 
145 /*
146  * Handle an error received on the local endpoint.
147  */
rxrpc_error_report(struct sock * sk)148 void rxrpc_error_report(struct sock *sk)
149 {
150 	struct sock_exterr_skb *serr;
151 	struct sockaddr_rxrpc srx;
152 	struct rxrpc_local *local = sk->sk_user_data;
153 	struct rxrpc_peer *peer;
154 	struct sk_buff *skb;
155 
156 	if (unlikely(!local))
157 		return;
158 
159 	_enter("%p{%d}", sk, local->debug_id);
160 
161 	skb = sock_dequeue_err_skb(sk);
162 	if (!skb) {
163 		_leave("UDP socket errqueue empty");
164 		return;
165 	}
166 	rxrpc_new_skb(skb, rxrpc_skb_rx_received);
167 	serr = SKB_EXT_ERR(skb);
168 	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
169 		_leave("UDP empty message");
170 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
171 		return;
172 	}
173 
174 	rcu_read_lock();
175 	peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
176 	if (peer && !rxrpc_get_peer_maybe(peer))
177 		peer = NULL;
178 	if (!peer) {
179 		rcu_read_unlock();
180 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
181 		_leave(" [no peer]");
182 		return;
183 	}
184 
185 	trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
186 
187 	if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
188 	     serr->ee.ee_type == ICMP_DEST_UNREACH &&
189 	     serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
190 		rxrpc_adjust_mtu(peer, serr);
191 		rcu_read_unlock();
192 		rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
193 		rxrpc_put_peer(peer);
194 		_leave(" [MTU update]");
195 		return;
196 	}
197 
198 	rxrpc_store_error(peer, serr);
199 	rcu_read_unlock();
200 	rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
201 	rxrpc_put_peer(peer);
202 
203 	_leave("");
204 }
205 
206 /*
207  * Map an error report to error codes on the peer record.
208  */
rxrpc_store_error(struct rxrpc_peer * peer,struct sock_exterr_skb * serr)209 static void rxrpc_store_error(struct rxrpc_peer *peer,
210 			      struct sock_exterr_skb *serr)
211 {
212 	enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
213 	struct sock_extended_err *ee;
214 	int err;
215 
216 	_enter("");
217 
218 	ee = &serr->ee;
219 
220 	err = ee->ee_errno;
221 
222 	switch (ee->ee_origin) {
223 	case SO_EE_ORIGIN_ICMP:
224 		switch (ee->ee_type) {
225 		case ICMP_DEST_UNREACH:
226 			switch (ee->ee_code) {
227 			case ICMP_NET_UNREACH:
228 				_net("Rx Received ICMP Network Unreachable");
229 				break;
230 			case ICMP_HOST_UNREACH:
231 				_net("Rx Received ICMP Host Unreachable");
232 				break;
233 			case ICMP_PORT_UNREACH:
234 				_net("Rx Received ICMP Port Unreachable");
235 				break;
236 			case ICMP_NET_UNKNOWN:
237 				_net("Rx Received ICMP Unknown Network");
238 				break;
239 			case ICMP_HOST_UNKNOWN:
240 				_net("Rx Received ICMP Unknown Host");
241 				break;
242 			default:
243 				_net("Rx Received ICMP DestUnreach code=%u",
244 				     ee->ee_code);
245 				break;
246 			}
247 			break;
248 
249 		case ICMP_TIME_EXCEEDED:
250 			_net("Rx Received ICMP TTL Exceeded");
251 			break;
252 
253 		default:
254 			_proto("Rx Received ICMP error { type=%u code=%u }",
255 			       ee->ee_type, ee->ee_code);
256 			break;
257 		}
258 		break;
259 
260 	case SO_EE_ORIGIN_NONE:
261 	case SO_EE_ORIGIN_LOCAL:
262 		_proto("Rx Received local error { error=%d }", err);
263 		compl = RXRPC_CALL_LOCAL_ERROR;
264 		break;
265 
266 	case SO_EE_ORIGIN_ICMP6:
267 	default:
268 		_proto("Rx Received error report { orig=%u }", ee->ee_origin);
269 		break;
270 	}
271 
272 	rxrpc_distribute_error(peer, err, compl);
273 }
274 
275 /*
276  * Distribute an error that occurred on a peer.
277  */
rxrpc_distribute_error(struct rxrpc_peer * peer,int error,enum rxrpc_call_completion compl)278 static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
279 				   enum rxrpc_call_completion compl)
280 {
281 	struct rxrpc_call *call;
282 
283 	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
284 		rxrpc_see_call(call);
285 		if (call->state < RXRPC_CALL_COMPLETE &&
286 		    rxrpc_set_call_completion(call, compl, 0, -error))
287 			rxrpc_notify_socket(call);
288 	}
289 }
290 
291 /*
292  * Add RTT information to cache.  This is called in softirq mode and has
293  * exclusive access to the peer RTT data.
294  */
rxrpc_peer_add_rtt(struct rxrpc_call * call,enum rxrpc_rtt_rx_trace why,rxrpc_serial_t send_serial,rxrpc_serial_t resp_serial,ktime_t send_time,ktime_t resp_time)295 void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
296 			rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
297 			ktime_t send_time, ktime_t resp_time)
298 {
299 	struct rxrpc_peer *peer = call->peer;
300 	s64 rtt;
301 	u64 sum = peer->rtt_sum, avg;
302 	u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
303 
304 	rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
305 	if (rtt < 0)
306 		return;
307 
308 	spin_lock(&peer->rtt_input_lock);
309 
310 	/* Replace the oldest datum in the RTT buffer */
311 	sum -= peer->rtt_cache[cursor];
312 	sum += rtt;
313 	peer->rtt_cache[cursor] = rtt;
314 	peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
315 	peer->rtt_sum = sum;
316 	if (usage < RXRPC_RTT_CACHE_SIZE) {
317 		usage++;
318 		peer->rtt_usage = usage;
319 	}
320 
321 	spin_unlock(&peer->rtt_input_lock);
322 
323 	/* Now recalculate the average */
324 	if (usage == RXRPC_RTT_CACHE_SIZE) {
325 		avg = sum / RXRPC_RTT_CACHE_SIZE;
326 	} else {
327 		avg = sum;
328 		do_div(avg, usage);
329 	}
330 
331 	/* Don't need to update this under lock */
332 	peer->rtt = avg;
333 	trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
334 			   usage, avg);
335 }
336 
337 /*
338  * Perform keep-alive pings.
339  */
rxrpc_peer_keepalive_dispatch(struct rxrpc_net * rxnet,struct list_head * collector,time64_t base,u8 cursor)340 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
341 					  struct list_head *collector,
342 					  time64_t base,
343 					  u8 cursor)
344 {
345 	struct rxrpc_peer *peer;
346 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
347 	time64_t keepalive_at;
348 	int slot;
349 
350 	spin_lock_bh(&rxnet->peer_hash_lock);
351 
352 	while (!list_empty(collector)) {
353 		peer = list_entry(collector->next,
354 				  struct rxrpc_peer, keepalive_link);
355 
356 		list_del_init(&peer->keepalive_link);
357 		if (!rxrpc_get_peer_maybe(peer))
358 			continue;
359 
360 		if (__rxrpc_use_local(peer->local)) {
361 			spin_unlock_bh(&rxnet->peer_hash_lock);
362 
363 			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
364 			slot = keepalive_at - base;
365 			_debug("%02x peer %u t=%d {%pISp}",
366 			       cursor, peer->debug_id, slot, &peer->srx.transport);
367 
368 			if (keepalive_at <= base ||
369 			    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
370 				rxrpc_send_keepalive(peer);
371 				slot = RXRPC_KEEPALIVE_TIME;
372 			}
373 
374 			/* A transmission to this peer occurred since last we
375 			 * examined it so put it into the appropriate future
376 			 * bucket.
377 			 */
378 			slot += cursor;
379 			slot &= mask;
380 			spin_lock_bh(&rxnet->peer_hash_lock);
381 			list_add_tail(&peer->keepalive_link,
382 				      &rxnet->peer_keepalive[slot & mask]);
383 			rxrpc_unuse_local(peer->local);
384 		}
385 		rxrpc_put_peer_locked(peer);
386 	}
387 
388 	spin_unlock_bh(&rxnet->peer_hash_lock);
389 }
390 
391 /*
392  * Perform keep-alive pings with VERSION packets to keep any NAT alive.
393  */
rxrpc_peer_keepalive_worker(struct work_struct * work)394 void rxrpc_peer_keepalive_worker(struct work_struct *work)
395 {
396 	struct rxrpc_net *rxnet =
397 		container_of(work, struct rxrpc_net, peer_keepalive_work);
398 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
399 	time64_t base, now, delay;
400 	u8 cursor, stop;
401 	LIST_HEAD(collector);
402 
403 	now = ktime_get_seconds();
404 	base = rxnet->peer_keepalive_base;
405 	cursor = rxnet->peer_keepalive_cursor;
406 	_enter("%lld,%u", base - now, cursor);
407 
408 	if (!rxnet->live)
409 		return;
410 
411 	/* Remove to a temporary list all the peers that are currently lodged
412 	 * in expired buckets plus all new peers.
413 	 *
414 	 * Everything in the bucket at the cursor is processed this
415 	 * second; the bucket at cursor + 1 goes at now + 1s and so
416 	 * on...
417 	 */
418 	spin_lock_bh(&rxnet->peer_hash_lock);
419 	list_splice_init(&rxnet->peer_keepalive_new, &collector);
420 
421 	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
422 	while (base <= now && (s8)(cursor - stop) < 0) {
423 		list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
424 				      &collector);
425 		base++;
426 		cursor++;
427 	}
428 
429 	base = now;
430 	spin_unlock_bh(&rxnet->peer_hash_lock);
431 
432 	rxnet->peer_keepalive_base = base;
433 	rxnet->peer_keepalive_cursor = cursor;
434 	rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
435 	ASSERT(list_empty(&collector));
436 
437 	/* Schedule the timer for the next occupied timeslot. */
438 	cursor = rxnet->peer_keepalive_cursor;
439 	stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
440 	for (; (s8)(cursor - stop) < 0; cursor++) {
441 		if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
442 			break;
443 		base++;
444 	}
445 
446 	now = ktime_get_seconds();
447 	delay = base - now;
448 	if (delay < 1)
449 		delay = 1;
450 	delay *= HZ;
451 	if (rxnet->live)
452 		timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
453 
454 	_leave("");
455 }
456