• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Common framework for low-level network console, dump, and debugger code
4  *
5  * Sep 8 2003  Matt Mackall <mpm@selenic.com>
6  *
7  * based on the netconsole code from:
8  *
9  * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
10  * Copyright (C) 2002  Red Hat, Inc.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/string.h>
20 #include <linux/if_arp.h>
21 #include <linux/inetdevice.h>
22 #include <linux/inet.h>
23 #include <linux/interrupt.h>
24 #include <linux/netpoll.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/rcupdate.h>
28 #include <linux/workqueue.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/if_vlan.h>
32 #include <net/dsa.h>
33 #include <net/tcp.h>
34 #include <net/udp.h>
35 #include <net/addrconf.h>
36 #include <net/ndisc.h>
37 #include <net/ip6_checksum.h>
38 #include <asm/unaligned.h>
39 #include <trace/events/napi.h>
40 
41 /*
42  * We maintain a small pool of fully-sized skbs, to make sure the
43  * message gets out even in extreme OOM situations.
44  */
45 
46 #define MAX_UDP_CHUNK 1460
47 #define MAX_SKBS 32
48 
49 static struct sk_buff_head skb_pool;
50 
51 DEFINE_STATIC_SRCU(netpoll_srcu);
52 
53 #define USEC_PER_POLL	50
54 
55 #define MAX_SKB_SIZE							\
56 	(sizeof(struct ethhdr) +					\
57 	 sizeof(struct iphdr) +						\
58 	 sizeof(struct udphdr) +					\
59 	 MAX_UDP_CHUNK)
60 
61 static void zap_completion_queue(void);
62 
63 static unsigned int carrier_timeout = 4;
64 module_param(carrier_timeout, uint, 0644);
65 
66 #define np_info(np, fmt, ...)				\
67 	pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
68 #define np_err(np, fmt, ...)				\
69 	pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
70 #define np_notice(np, fmt, ...)				\
71 	pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
72 
netpoll_start_xmit(struct sk_buff * skb,struct net_device * dev,struct netdev_queue * txq)73 static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
74 				      struct net_device *dev,
75 				      struct netdev_queue *txq)
76 {
77 	netdev_tx_t status = NETDEV_TX_OK;
78 	netdev_features_t features;
79 
80 	features = netif_skb_features(skb);
81 
82 	if (skb_vlan_tag_present(skb) &&
83 	    !vlan_hw_offload_capable(features, skb->vlan_proto)) {
84 		skb = __vlan_hwaccel_push_inside(skb);
85 		if (unlikely(!skb)) {
86 			/* This is actually a packet drop, but we
87 			 * don't want the code that calls this
88 			 * function to try and operate on a NULL skb.
89 			 */
90 			goto out;
91 		}
92 	}
93 
94 	status = netdev_start_xmit(skb, dev, txq, false);
95 
96 out:
97 	return status;
98 }
99 
queue_process(struct work_struct * work)100 static void queue_process(struct work_struct *work)
101 {
102 	struct netpoll_info *npinfo =
103 		container_of(work, struct netpoll_info, tx_work.work);
104 	struct sk_buff *skb;
105 	unsigned long flags;
106 
107 	while ((skb = skb_dequeue(&npinfo->txq))) {
108 		struct net_device *dev = skb->dev;
109 		struct netdev_queue *txq;
110 		unsigned int q_index;
111 
112 		if (!netif_device_present(dev) || !netif_running(dev)) {
113 			kfree_skb(skb);
114 			continue;
115 		}
116 
117 		local_irq_save(flags);
118 		/* check if skb->queue_mapping is still valid */
119 		q_index = skb_get_queue_mapping(skb);
120 		if (unlikely(q_index >= dev->real_num_tx_queues)) {
121 			q_index = q_index % dev->real_num_tx_queues;
122 			skb_set_queue_mapping(skb, q_index);
123 		}
124 		txq = netdev_get_tx_queue(dev, q_index);
125 		HARD_TX_LOCK(dev, txq, smp_processor_id());
126 		if (netif_xmit_frozen_or_stopped(txq) ||
127 		    !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
128 			skb_queue_head(&npinfo->txq, skb);
129 			HARD_TX_UNLOCK(dev, txq);
130 			local_irq_restore(flags);
131 
132 			schedule_delayed_work(&npinfo->tx_work, HZ/10);
133 			return;
134 		}
135 		HARD_TX_UNLOCK(dev, txq);
136 		local_irq_restore(flags);
137 	}
138 }
139 
netif_local_xmit_active(struct net_device * dev)140 static int netif_local_xmit_active(struct net_device *dev)
141 {
142 	int i;
143 
144 	for (i = 0; i < dev->num_tx_queues; i++) {
145 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146 
147 		if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
148 			return 1;
149 	}
150 
151 	return 0;
152 }
153 
poll_one_napi(struct napi_struct * napi)154 static void poll_one_napi(struct napi_struct *napi)
155 {
156 	int work;
157 
158 	/* If we set this bit but see that it has already been set,
159 	 * that indicates that napi has been disabled and we need
160 	 * to abort this operation
161 	 */
162 	if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
163 		return;
164 
165 	/* We explicilty pass the polling call a budget of 0 to
166 	 * indicate that we are clearing the Tx path only.
167 	 */
168 	work = napi->poll(napi, 0);
169 	WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
170 	trace_napi_poll(napi, work, 0);
171 
172 	clear_bit(NAPI_STATE_NPSVC, &napi->state);
173 }
174 
poll_napi(struct net_device * dev)175 static void poll_napi(struct net_device *dev)
176 {
177 	struct napi_struct *napi;
178 	int cpu = smp_processor_id();
179 
180 	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
181 		if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
182 			poll_one_napi(napi);
183 			smp_store_release(&napi->poll_owner, -1);
184 		}
185 	}
186 }
187 
netpoll_poll_dev(struct net_device * dev)188 void netpoll_poll_dev(struct net_device *dev)
189 {
190 	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
191 	const struct net_device_ops *ops;
192 
193 	/* Don't do any rx activity if the dev_lock mutex is held
194 	 * the dev_open/close paths use this to block netpoll activity
195 	 * while changing device state
196 	 */
197 	if (!ni || down_trylock(&ni->dev_lock))
198 		return;
199 
200 	/* Some drivers will take the same locks in poll and xmit,
201 	 * we can't poll if local CPU is already in xmit.
202 	 */
203 	if (!netif_running(dev) || netif_local_xmit_active(dev)) {
204 		up(&ni->dev_lock);
205 		return;
206 	}
207 
208 	ops = dev->netdev_ops;
209 	if (ops->ndo_poll_controller)
210 		ops->ndo_poll_controller(dev);
211 
212 	poll_napi(dev);
213 
214 	up(&ni->dev_lock);
215 
216 	zap_completion_queue();
217 }
218 EXPORT_SYMBOL(netpoll_poll_dev);
219 
netpoll_poll_disable(struct net_device * dev)220 void netpoll_poll_disable(struct net_device *dev)
221 {
222 	struct netpoll_info *ni;
223 	int idx;
224 	might_sleep();
225 	idx = srcu_read_lock(&netpoll_srcu);
226 	ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
227 	if (ni)
228 		down(&ni->dev_lock);
229 	srcu_read_unlock(&netpoll_srcu, idx);
230 }
231 EXPORT_SYMBOL(netpoll_poll_disable);
232 
netpoll_poll_enable(struct net_device * dev)233 void netpoll_poll_enable(struct net_device *dev)
234 {
235 	struct netpoll_info *ni;
236 	rcu_read_lock();
237 	ni = rcu_dereference(dev->npinfo);
238 	if (ni)
239 		up(&ni->dev_lock);
240 	rcu_read_unlock();
241 }
242 EXPORT_SYMBOL(netpoll_poll_enable);
243 
refill_skbs(void)244 static void refill_skbs(void)
245 {
246 	struct sk_buff *skb;
247 	unsigned long flags;
248 
249 	spin_lock_irqsave(&skb_pool.lock, flags);
250 	while (skb_pool.qlen < MAX_SKBS) {
251 		skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
252 		if (!skb)
253 			break;
254 
255 		__skb_queue_tail(&skb_pool, skb);
256 	}
257 	spin_unlock_irqrestore(&skb_pool.lock, flags);
258 }
259 
zap_completion_queue(void)260 static void zap_completion_queue(void)
261 {
262 	unsigned long flags;
263 	struct softnet_data *sd = &get_cpu_var(softnet_data);
264 
265 	if (sd->completion_queue) {
266 		struct sk_buff *clist;
267 
268 		local_irq_save(flags);
269 		clist = sd->completion_queue;
270 		sd->completion_queue = NULL;
271 		local_irq_restore(flags);
272 
273 		while (clist != NULL) {
274 			struct sk_buff *skb = clist;
275 			clist = clist->next;
276 			if (!skb_irq_freeable(skb)) {
277 				refcount_set(&skb->users, 1);
278 				dev_kfree_skb_any(skb); /* put this one back */
279 			} else {
280 				__kfree_skb(skb);
281 			}
282 		}
283 	}
284 
285 	put_cpu_var(softnet_data);
286 }
287 
find_skb(struct netpoll * np,int len,int reserve)288 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
289 {
290 	int count = 0;
291 	struct sk_buff *skb;
292 
293 	zap_completion_queue();
294 	refill_skbs();
295 repeat:
296 
297 	skb = alloc_skb(len, GFP_ATOMIC);
298 	if (!skb)
299 		skb = skb_dequeue(&skb_pool);
300 
301 	if (!skb) {
302 		if (++count < 10) {
303 			netpoll_poll_dev(np->dev);
304 			goto repeat;
305 		}
306 		return NULL;
307 	}
308 
309 	refcount_set(&skb->users, 1);
310 	skb_reserve(skb, reserve);
311 	return skb;
312 }
313 
netpoll_owner_active(struct net_device * dev)314 static int netpoll_owner_active(struct net_device *dev)
315 {
316 	struct napi_struct *napi;
317 
318 	list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
319 		if (napi->poll_owner == smp_processor_id())
320 			return 1;
321 	}
322 	return 0;
323 }
324 
325 /* call with IRQ disabled */
__netpoll_send_skb(struct netpoll * np,struct sk_buff * skb)326 static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
327 {
328 	netdev_tx_t status = NETDEV_TX_BUSY;
329 	struct net_device *dev;
330 	unsigned long tries;
331 	/* It is up to the caller to keep npinfo alive. */
332 	struct netpoll_info *npinfo;
333 
334 	lockdep_assert_irqs_disabled();
335 
336 	dev = np->dev;
337 	npinfo = rcu_dereference_bh(dev->npinfo);
338 
339 	if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
340 		dev_kfree_skb_irq(skb);
341 		return NET_XMIT_DROP;
342 	}
343 
344 	/* don't get messages out of order, and no recursion */
345 	if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
346 		struct netdev_queue *txq;
347 
348 		txq = netdev_core_pick_tx(dev, skb, NULL);
349 
350 		/* try until next clock tick */
351 		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
352 		     tries > 0; --tries) {
353 			if (HARD_TX_TRYLOCK(dev, txq)) {
354 				if (!netif_xmit_stopped(txq))
355 					status = netpoll_start_xmit(skb, dev, txq);
356 
357 				HARD_TX_UNLOCK(dev, txq);
358 
359 				if (dev_xmit_complete(status))
360 					break;
361 
362 			}
363 
364 			/* tickle device maybe there is some cleanup */
365 			netpoll_poll_dev(np->dev);
366 
367 			udelay(USEC_PER_POLL);
368 		}
369 
370 		WARN_ONCE(!irqs_disabled(),
371 			"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
372 			dev->name, dev->netdev_ops->ndo_start_xmit);
373 
374 	}
375 
376 	if (!dev_xmit_complete(status)) {
377 		skb_queue_tail(&npinfo->txq, skb);
378 		schedule_delayed_work(&npinfo->tx_work,0);
379 	}
380 	return NETDEV_TX_OK;
381 }
382 
netpoll_send_skb(struct netpoll * np,struct sk_buff * skb)383 netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
384 {
385 	unsigned long flags;
386 	netdev_tx_t ret;
387 
388 	if (unlikely(!np)) {
389 		dev_kfree_skb_irq(skb);
390 		ret = NET_XMIT_DROP;
391 	} else {
392 		local_irq_save(flags);
393 		ret = __netpoll_send_skb(np, skb);
394 		local_irq_restore(flags);
395 	}
396 	return ret;
397 }
398 EXPORT_SYMBOL(netpoll_send_skb);
399 
netpoll_send_udp(struct netpoll * np,const char * msg,int len)400 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
401 {
402 	int total_len, ip_len, udp_len;
403 	struct sk_buff *skb;
404 	struct udphdr *udph;
405 	struct iphdr *iph;
406 	struct ethhdr *eth;
407 	static atomic_t ip_ident;
408 	struct ipv6hdr *ip6h;
409 
410 	WARN_ON_ONCE(!irqs_disabled());
411 
412 	udp_len = len + sizeof(*udph);
413 	if (np->ipv6)
414 		ip_len = udp_len + sizeof(*ip6h);
415 	else
416 		ip_len = udp_len + sizeof(*iph);
417 
418 	total_len = ip_len + LL_RESERVED_SPACE(np->dev);
419 
420 	skb = find_skb(np, total_len + np->dev->needed_tailroom,
421 		       total_len - len);
422 	if (!skb)
423 		return;
424 
425 	skb_copy_to_linear_data(skb, msg, len);
426 	skb_put(skb, len);
427 
428 	skb_push(skb, sizeof(*udph));
429 	skb_reset_transport_header(skb);
430 	udph = udp_hdr(skb);
431 	udph->source = htons(np->local_port);
432 	udph->dest = htons(np->remote_port);
433 	udph->len = htons(udp_len);
434 
435 	if (np->ipv6) {
436 		udph->check = 0;
437 		udph->check = csum_ipv6_magic(&np->local_ip.in6,
438 					      &np->remote_ip.in6,
439 					      udp_len, IPPROTO_UDP,
440 					      csum_partial(udph, udp_len, 0));
441 		if (udph->check == 0)
442 			udph->check = CSUM_MANGLED_0;
443 
444 		skb_push(skb, sizeof(*ip6h));
445 		skb_reset_network_header(skb);
446 		ip6h = ipv6_hdr(skb);
447 
448 		/* ip6h->version = 6; ip6h->priority = 0; */
449 		put_unaligned(0x60, (unsigned char *)ip6h);
450 		ip6h->flow_lbl[0] = 0;
451 		ip6h->flow_lbl[1] = 0;
452 		ip6h->flow_lbl[2] = 0;
453 
454 		ip6h->payload_len = htons(sizeof(struct udphdr) + len);
455 		ip6h->nexthdr = IPPROTO_UDP;
456 		ip6h->hop_limit = 32;
457 		ip6h->saddr = np->local_ip.in6;
458 		ip6h->daddr = np->remote_ip.in6;
459 
460 		eth = skb_push(skb, ETH_HLEN);
461 		skb_reset_mac_header(skb);
462 		skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
463 	} else {
464 		udph->check = 0;
465 		udph->check = csum_tcpudp_magic(np->local_ip.ip,
466 						np->remote_ip.ip,
467 						udp_len, IPPROTO_UDP,
468 						csum_partial(udph, udp_len, 0));
469 		if (udph->check == 0)
470 			udph->check = CSUM_MANGLED_0;
471 
472 		skb_push(skb, sizeof(*iph));
473 		skb_reset_network_header(skb);
474 		iph = ip_hdr(skb);
475 
476 		/* iph->version = 4; iph->ihl = 5; */
477 		put_unaligned(0x45, (unsigned char *)iph);
478 		iph->tos      = 0;
479 		put_unaligned(htons(ip_len), &(iph->tot_len));
480 		iph->id       = htons(atomic_inc_return(&ip_ident));
481 		iph->frag_off = 0;
482 		iph->ttl      = 64;
483 		iph->protocol = IPPROTO_UDP;
484 		iph->check    = 0;
485 		put_unaligned(np->local_ip.ip, &(iph->saddr));
486 		put_unaligned(np->remote_ip.ip, &(iph->daddr));
487 		iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
488 
489 		eth = skb_push(skb, ETH_HLEN);
490 		skb_reset_mac_header(skb);
491 		skb->protocol = eth->h_proto = htons(ETH_P_IP);
492 	}
493 
494 	ether_addr_copy(eth->h_source, np->dev->dev_addr);
495 	ether_addr_copy(eth->h_dest, np->remote_mac);
496 
497 	skb->dev = np->dev;
498 
499 	netpoll_send_skb(np, skb);
500 }
501 EXPORT_SYMBOL(netpoll_send_udp);
502 
netpoll_print_options(struct netpoll * np)503 void netpoll_print_options(struct netpoll *np)
504 {
505 	np_info(np, "local port %d\n", np->local_port);
506 	if (np->ipv6)
507 		np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
508 	else
509 		np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
510 	np_info(np, "interface '%s'\n", np->dev_name);
511 	np_info(np, "remote port %d\n", np->remote_port);
512 	if (np->ipv6)
513 		np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
514 	else
515 		np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
516 	np_info(np, "remote ethernet address %pM\n", np->remote_mac);
517 }
518 EXPORT_SYMBOL(netpoll_print_options);
519 
netpoll_parse_ip_addr(const char * str,union inet_addr * addr)520 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
521 {
522 	const char *end;
523 
524 	if (!strchr(str, ':') &&
525 	    in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
526 		if (!*end)
527 			return 0;
528 	}
529 	if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
530 #if IS_ENABLED(CONFIG_IPV6)
531 		if (!*end)
532 			return 1;
533 #else
534 		return -1;
535 #endif
536 	}
537 	return -1;
538 }
539 
netpoll_parse_options(struct netpoll * np,char * opt)540 int netpoll_parse_options(struct netpoll *np, char *opt)
541 {
542 	char *cur=opt, *delim;
543 	int ipv6;
544 	bool ipversion_set = false;
545 
546 	if (*cur != '@') {
547 		if ((delim = strchr(cur, '@')) == NULL)
548 			goto parse_failed;
549 		*delim = 0;
550 		if (kstrtou16(cur, 10, &np->local_port))
551 			goto parse_failed;
552 		cur = delim;
553 	}
554 	cur++;
555 
556 	if (*cur != '/') {
557 		ipversion_set = true;
558 		if ((delim = strchr(cur, '/')) == NULL)
559 			goto parse_failed;
560 		*delim = 0;
561 		ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
562 		if (ipv6 < 0)
563 			goto parse_failed;
564 		else
565 			np->ipv6 = (bool)ipv6;
566 		cur = delim;
567 	}
568 	cur++;
569 
570 	if (*cur != ',') {
571 		/* parse out dev name */
572 		if ((delim = strchr(cur, ',')) == NULL)
573 			goto parse_failed;
574 		*delim = 0;
575 		strlcpy(np->dev_name, cur, sizeof(np->dev_name));
576 		cur = delim;
577 	}
578 	cur++;
579 
580 	if (*cur != '@') {
581 		/* dst port */
582 		if ((delim = strchr(cur, '@')) == NULL)
583 			goto parse_failed;
584 		*delim = 0;
585 		if (*cur == ' ' || *cur == '\t')
586 			np_info(np, "warning: whitespace is not allowed\n");
587 		if (kstrtou16(cur, 10, &np->remote_port))
588 			goto parse_failed;
589 		cur = delim;
590 	}
591 	cur++;
592 
593 	/* dst ip */
594 	if ((delim = strchr(cur, '/')) == NULL)
595 		goto parse_failed;
596 	*delim = 0;
597 	ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
598 	if (ipv6 < 0)
599 		goto parse_failed;
600 	else if (ipversion_set && np->ipv6 != (bool)ipv6)
601 		goto parse_failed;
602 	else
603 		np->ipv6 = (bool)ipv6;
604 	cur = delim + 1;
605 
606 	if (*cur != 0) {
607 		/* MAC address */
608 		if (!mac_pton(cur, np->remote_mac))
609 			goto parse_failed;
610 	}
611 
612 	netpoll_print_options(np);
613 
614 	return 0;
615 
616  parse_failed:
617 	np_info(np, "couldn't parse config at '%s'!\n", cur);
618 	return -1;
619 }
620 EXPORT_SYMBOL(netpoll_parse_options);
621 
__netpoll_setup(struct netpoll * np,struct net_device * ndev)622 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
623 {
624 	struct netpoll_info *npinfo;
625 	const struct net_device_ops *ops;
626 	int err;
627 
628 	np->dev = ndev;
629 	strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
630 
631 	if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
632 		np_err(np, "%s doesn't support polling, aborting\n",
633 		       np->dev_name);
634 		err = -ENOTSUPP;
635 		goto out;
636 	}
637 
638 	if (!ndev->npinfo) {
639 		npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
640 		if (!npinfo) {
641 			err = -ENOMEM;
642 			goto out;
643 		}
644 
645 		sema_init(&npinfo->dev_lock, 1);
646 		skb_queue_head_init(&npinfo->txq);
647 		INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
648 
649 		refcount_set(&npinfo->refcnt, 1);
650 
651 		ops = np->dev->netdev_ops;
652 		if (ops->ndo_netpoll_setup) {
653 			err = ops->ndo_netpoll_setup(ndev, npinfo);
654 			if (err)
655 				goto free_npinfo;
656 		}
657 	} else {
658 		npinfo = rtnl_dereference(ndev->npinfo);
659 		refcount_inc(&npinfo->refcnt);
660 	}
661 
662 	npinfo->netpoll = np;
663 
664 	/* last thing to do is link it to the net device structure */
665 	rcu_assign_pointer(ndev->npinfo, npinfo);
666 
667 	return 0;
668 
669 free_npinfo:
670 	kfree(npinfo);
671 out:
672 	return err;
673 }
674 EXPORT_SYMBOL_GPL(__netpoll_setup);
675 
netpoll_setup(struct netpoll * np)676 int netpoll_setup(struct netpoll *np)
677 {
678 	struct net_device *ndev = NULL, *dev = NULL;
679 	struct net *net = current->nsproxy->net_ns;
680 	struct in_device *in_dev;
681 	int err;
682 
683 	rtnl_lock();
684 	if (np->dev_name[0])
685 		ndev = __dev_get_by_name(net, np->dev_name);
686 
687 	if (!ndev) {
688 		np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
689 		err = -ENODEV;
690 		goto unlock;
691 	}
692 	dev_hold(ndev);
693 
694 	/* bring up DSA management network devices up first */
695 	for_each_netdev(net, dev) {
696 		if (!netdev_uses_dsa(dev))
697 			continue;
698 
699 		err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
700 		if (err < 0) {
701 			np_err(np, "%s failed to open %s\n",
702 			       np->dev_name, dev->name);
703 			goto put;
704 		}
705 	}
706 
707 	if (netdev_master_upper_dev_get(ndev)) {
708 		np_err(np, "%s is a slave device, aborting\n", np->dev_name);
709 		err = -EBUSY;
710 		goto put;
711 	}
712 
713 	if (!netif_running(ndev)) {
714 		unsigned long atmost, atleast;
715 
716 		np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
717 
718 		err = dev_open(ndev, NULL);
719 
720 		if (err) {
721 			np_err(np, "failed to open %s\n", ndev->name);
722 			goto put;
723 		}
724 
725 		rtnl_unlock();
726 		atleast = jiffies + HZ/10;
727 		atmost = jiffies + carrier_timeout * HZ;
728 		while (!netif_carrier_ok(ndev)) {
729 			if (time_after(jiffies, atmost)) {
730 				np_notice(np, "timeout waiting for carrier\n");
731 				break;
732 			}
733 			msleep(1);
734 		}
735 
736 		/* If carrier appears to come up instantly, we don't
737 		 * trust it and pause so that we don't pump all our
738 		 * queued console messages into the bitbucket.
739 		 */
740 
741 		if (time_before(jiffies, atleast)) {
742 			np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
743 			msleep(4000);
744 		}
745 		rtnl_lock();
746 	}
747 
748 	if (!np->local_ip.ip) {
749 		if (!np->ipv6) {
750 			const struct in_ifaddr *ifa;
751 
752 			in_dev = __in_dev_get_rtnl(ndev);
753 			if (!in_dev)
754 				goto put_noaddr;
755 
756 			ifa = rtnl_dereference(in_dev->ifa_list);
757 			if (!ifa) {
758 put_noaddr:
759 				np_err(np, "no IP address for %s, aborting\n",
760 				       np->dev_name);
761 				err = -EDESTADDRREQ;
762 				goto put;
763 			}
764 
765 			np->local_ip.ip = ifa->ifa_local;
766 			np_info(np, "local IP %pI4\n", &np->local_ip.ip);
767 		} else {
768 #if IS_ENABLED(CONFIG_IPV6)
769 			struct inet6_dev *idev;
770 
771 			err = -EDESTADDRREQ;
772 			idev = __in6_dev_get(ndev);
773 			if (idev) {
774 				struct inet6_ifaddr *ifp;
775 
776 				read_lock_bh(&idev->lock);
777 				list_for_each_entry(ifp, &idev->addr_list, if_list) {
778 					if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
779 					    !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
780 						continue;
781 					np->local_ip.in6 = ifp->addr;
782 					err = 0;
783 					break;
784 				}
785 				read_unlock_bh(&idev->lock);
786 			}
787 			if (err) {
788 				np_err(np, "no IPv6 address for %s, aborting\n",
789 				       np->dev_name);
790 				goto put;
791 			} else
792 				np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
793 #else
794 			np_err(np, "IPv6 is not supported %s, aborting\n",
795 			       np->dev_name);
796 			err = -EINVAL;
797 			goto put;
798 #endif
799 		}
800 	}
801 
802 	/* fill up the skb queue */
803 	refill_skbs();
804 
805 	err = __netpoll_setup(np, ndev);
806 	if (err)
807 		goto put;
808 
809 	rtnl_unlock();
810 	return 0;
811 
812 put:
813 	dev_put(ndev);
814 unlock:
815 	rtnl_unlock();
816 	return err;
817 }
818 EXPORT_SYMBOL(netpoll_setup);
819 
netpoll_init(void)820 static int __init netpoll_init(void)
821 {
822 	skb_queue_head_init(&skb_pool);
823 	return 0;
824 }
825 core_initcall(netpoll_init);
826 
rcu_cleanup_netpoll_info(struct rcu_head * rcu_head)827 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
828 {
829 	struct netpoll_info *npinfo =
830 			container_of(rcu_head, struct netpoll_info, rcu);
831 
832 	skb_queue_purge(&npinfo->txq);
833 
834 	/* we can't call cancel_delayed_work_sync here, as we are in softirq */
835 	cancel_delayed_work(&npinfo->tx_work);
836 
837 	/* clean after last, unfinished work */
838 	__skb_queue_purge(&npinfo->txq);
839 	/* now cancel it again */
840 	cancel_delayed_work(&npinfo->tx_work);
841 	kfree(npinfo);
842 }
843 
__netpoll_cleanup(struct netpoll * np)844 void __netpoll_cleanup(struct netpoll *np)
845 {
846 	struct netpoll_info *npinfo;
847 
848 	npinfo = rtnl_dereference(np->dev->npinfo);
849 	if (!npinfo)
850 		return;
851 
852 	synchronize_srcu(&netpoll_srcu);
853 
854 	if (refcount_dec_and_test(&npinfo->refcnt)) {
855 		const struct net_device_ops *ops;
856 
857 		ops = np->dev->netdev_ops;
858 		if (ops->ndo_netpoll_cleanup)
859 			ops->ndo_netpoll_cleanup(np->dev);
860 
861 		RCU_INIT_POINTER(np->dev->npinfo, NULL);
862 		call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
863 	} else
864 		RCU_INIT_POINTER(np->dev->npinfo, NULL);
865 }
866 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
867 
__netpoll_free(struct netpoll * np)868 void __netpoll_free(struct netpoll *np)
869 {
870 	ASSERT_RTNL();
871 
872 	/* Wait for transmitting packets to finish before freeing. */
873 	synchronize_rcu();
874 	__netpoll_cleanup(np);
875 	kfree(np);
876 }
877 EXPORT_SYMBOL_GPL(__netpoll_free);
878 
netpoll_cleanup(struct netpoll * np)879 void netpoll_cleanup(struct netpoll *np)
880 {
881 	rtnl_lock();
882 	if (!np->dev)
883 		goto out;
884 	__netpoll_cleanup(np);
885 	dev_put(np->dev);
886 	np->dev = NULL;
887 out:
888 	rtnl_unlock();
889 }
890 EXPORT_SYMBOL(netpoll_cleanup);
891