• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/init.h>
12 #include <linux/atomic.h>
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/device.h>
16 #include <linux/io.h>
17 #include <linux/delay.h>
18 #include <linux/netdevice.h>
19 #include <linux/inetdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/pci.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_vlan.h>
24 #include <linux/in.h>
25 #include <linux/slab.h>
26 #include <linux/rtnetlink.h>
27 #include <linux/netpoll.h>
28 #include <linux/bpf.h>
29 
30 #include <net/arp.h>
31 #include <net/route.h>
32 #include <net/sock.h>
33 #include <net/pkt_sched.h>
34 #include <net/checksum.h>
35 #include <net/ip6_checksum.h>
36 
37 #include "hyperv_net.h"
38 
39 #define RING_SIZE_MIN	64
40 #define RETRY_US_LO	5000
41 #define RETRY_US_HI	10000
42 #define RETRY_MAX	2000	/* >10 sec */
43 
44 #define LINKCHANGE_INT (2 * HZ)
45 #define VF_TAKEOVER_INT (HZ / 10)
46 
47 static unsigned int ring_size __ro_after_init = 128;
48 module_param(ring_size, uint, 0444);
49 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
50 unsigned int netvsc_ring_bytes __ro_after_init;
51 
52 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
53 				NETIF_MSG_LINK | NETIF_MSG_IFUP |
54 				NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
55 				NETIF_MSG_TX_ERR;
56 
57 static int debug = -1;
58 module_param(debug, int, 0444);
59 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60 
61 static LIST_HEAD(netvsc_dev_list);
62 
netvsc_change_rx_flags(struct net_device * net,int change)63 static void netvsc_change_rx_flags(struct net_device *net, int change)
64 {
65 	struct net_device_context *ndev_ctx = netdev_priv(net);
66 	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
67 	int inc;
68 
69 	if (!vf_netdev)
70 		return;
71 
72 	if (change & IFF_PROMISC) {
73 		inc = (net->flags & IFF_PROMISC) ? 1 : -1;
74 		dev_set_promiscuity(vf_netdev, inc);
75 	}
76 
77 	if (change & IFF_ALLMULTI) {
78 		inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
79 		dev_set_allmulti(vf_netdev, inc);
80 	}
81 }
82 
netvsc_set_rx_mode(struct net_device * net)83 static void netvsc_set_rx_mode(struct net_device *net)
84 {
85 	struct net_device_context *ndev_ctx = netdev_priv(net);
86 	struct net_device *vf_netdev;
87 	struct netvsc_device *nvdev;
88 
89 	rcu_read_lock();
90 	vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
91 	if (vf_netdev) {
92 		dev_uc_sync(vf_netdev, net);
93 		dev_mc_sync(vf_netdev, net);
94 	}
95 
96 	nvdev = rcu_dereference(ndev_ctx->nvdev);
97 	if (nvdev)
98 		rndis_filter_update(nvdev);
99 	rcu_read_unlock();
100 }
101 
netvsc_tx_enable(struct netvsc_device * nvscdev,struct net_device * ndev)102 static void netvsc_tx_enable(struct netvsc_device *nvscdev,
103 			     struct net_device *ndev)
104 {
105 	nvscdev->tx_disable = false;
106 	virt_wmb(); /* ensure queue wake up mechanism is on */
107 
108 	netif_tx_wake_all_queues(ndev);
109 }
110 
netvsc_open(struct net_device * net)111 static int netvsc_open(struct net_device *net)
112 {
113 	struct net_device_context *ndev_ctx = netdev_priv(net);
114 	struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
115 	struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
116 	struct rndis_device *rdev;
117 	int ret = 0;
118 
119 	netif_carrier_off(net);
120 
121 	/* Open up the device */
122 	ret = rndis_filter_open(nvdev);
123 	if (ret != 0) {
124 		netdev_err(net, "unable to open device (ret %d).\n", ret);
125 		return ret;
126 	}
127 
128 	rdev = nvdev->extension;
129 	if (!rdev->link_state) {
130 		netif_carrier_on(net);
131 		netvsc_tx_enable(nvdev, net);
132 	}
133 
134 	if (vf_netdev) {
135 		/* Setting synthetic device up transparently sets
136 		 * slave as up. If open fails, then slave will be
137 		 * still be offline (and not used).
138 		 */
139 		ret = dev_open(vf_netdev, NULL);
140 		if (ret)
141 			netdev_warn(net,
142 				    "unable to open slave: %s: %d\n",
143 				    vf_netdev->name, ret);
144 	}
145 	return 0;
146 }
147 
netvsc_wait_until_empty(struct netvsc_device * nvdev)148 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
149 {
150 	unsigned int retry = 0;
151 	int i;
152 
153 	/* Ensure pending bytes in ring are read */
154 	for (;;) {
155 		u32 aread = 0;
156 
157 		for (i = 0; i < nvdev->num_chn; i++) {
158 			struct vmbus_channel *chn
159 				= nvdev->chan_table[i].channel;
160 
161 			if (!chn)
162 				continue;
163 
164 			/* make sure receive not running now */
165 			napi_synchronize(&nvdev->chan_table[i].napi);
166 
167 			aread = hv_get_bytes_to_read(&chn->inbound);
168 			if (aread)
169 				break;
170 
171 			aread = hv_get_bytes_to_read(&chn->outbound);
172 			if (aread)
173 				break;
174 		}
175 
176 		if (aread == 0)
177 			return 0;
178 
179 		if (++retry > RETRY_MAX)
180 			return -ETIMEDOUT;
181 
182 		usleep_range(RETRY_US_LO, RETRY_US_HI);
183 	}
184 }
185 
netvsc_tx_disable(struct netvsc_device * nvscdev,struct net_device * ndev)186 static void netvsc_tx_disable(struct netvsc_device *nvscdev,
187 			      struct net_device *ndev)
188 {
189 	if (nvscdev) {
190 		nvscdev->tx_disable = true;
191 		virt_wmb(); /* ensure txq will not wake up after stop */
192 	}
193 
194 	netif_tx_disable(ndev);
195 }
196 
netvsc_close(struct net_device * net)197 static int netvsc_close(struct net_device *net)
198 {
199 	struct net_device_context *net_device_ctx = netdev_priv(net);
200 	struct net_device *vf_netdev
201 		= rtnl_dereference(net_device_ctx->vf_netdev);
202 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
203 	int ret;
204 
205 	netvsc_tx_disable(nvdev, net);
206 
207 	/* No need to close rndis filter if it is removed already */
208 	if (!nvdev)
209 		return 0;
210 
211 	ret = rndis_filter_close(nvdev);
212 	if (ret != 0) {
213 		netdev_err(net, "unable to close device (ret %d).\n", ret);
214 		return ret;
215 	}
216 
217 	ret = netvsc_wait_until_empty(nvdev);
218 	if (ret)
219 		netdev_err(net, "Ring buffer not empty after closing rndis\n");
220 
221 	if (vf_netdev)
222 		dev_close(vf_netdev);
223 
224 	return ret;
225 }
226 
init_ppi_data(struct rndis_message * msg,u32 ppi_size,u32 pkt_type)227 static inline void *init_ppi_data(struct rndis_message *msg,
228 				  u32 ppi_size, u32 pkt_type)
229 {
230 	struct rndis_packet *rndis_pkt = &msg->msg.pkt;
231 	struct rndis_per_packet_info *ppi;
232 
233 	rndis_pkt->data_offset += ppi_size;
234 	ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
235 		+ rndis_pkt->per_pkt_info_len;
236 
237 	ppi->size = ppi_size;
238 	ppi->type = pkt_type;
239 	ppi->internal = 0;
240 	ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
241 
242 	rndis_pkt->per_pkt_info_len += ppi_size;
243 
244 	return ppi + 1;
245 }
246 
247 /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
248  * packets. We can use ethtool to change UDP hash level when necessary.
249  */
netvsc_get_hash(struct sk_buff * skb,const struct net_device_context * ndc)250 static inline u32 netvsc_get_hash(
251 	struct sk_buff *skb,
252 	const struct net_device_context *ndc)
253 {
254 	struct flow_keys flow;
255 	u32 hash, pkt_proto = 0;
256 	static u32 hashrnd __read_mostly;
257 
258 	net_get_random_once(&hashrnd, sizeof(hashrnd));
259 
260 	if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
261 		return 0;
262 
263 	switch (flow.basic.ip_proto) {
264 	case IPPROTO_TCP:
265 		if (flow.basic.n_proto == htons(ETH_P_IP))
266 			pkt_proto = HV_TCP4_L4HASH;
267 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
268 			pkt_proto = HV_TCP6_L4HASH;
269 
270 		break;
271 
272 	case IPPROTO_UDP:
273 		if (flow.basic.n_proto == htons(ETH_P_IP))
274 			pkt_proto = HV_UDP4_L4HASH;
275 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
276 			pkt_proto = HV_UDP6_L4HASH;
277 
278 		break;
279 	}
280 
281 	if (pkt_proto & ndc->l4_hash) {
282 		return skb_get_hash(skb);
283 	} else {
284 		if (flow.basic.n_proto == htons(ETH_P_IP))
285 			hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
286 		else if (flow.basic.n_proto == htons(ETH_P_IPV6))
287 			hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
288 		else
289 			return 0;
290 
291 		__skb_set_sw_hash(skb, hash, false);
292 	}
293 
294 	return hash;
295 }
296 
netvsc_get_tx_queue(struct net_device * ndev,struct sk_buff * skb,int old_idx)297 static inline int netvsc_get_tx_queue(struct net_device *ndev,
298 				      struct sk_buff *skb, int old_idx)
299 {
300 	const struct net_device_context *ndc = netdev_priv(ndev);
301 	struct sock *sk = skb->sk;
302 	int q_idx;
303 
304 	q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
305 			      (VRSS_SEND_TAB_SIZE - 1)];
306 
307 	/* If queue index changed record the new value */
308 	if (q_idx != old_idx &&
309 	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
310 		sk_tx_queue_set(sk, q_idx);
311 
312 	return q_idx;
313 }
314 
315 /*
316  * Select queue for transmit.
317  *
318  * If a valid queue has already been assigned, then use that.
319  * Otherwise compute tx queue based on hash and the send table.
320  *
321  * This is basically similar to default (netdev_pick_tx) with the added step
322  * of using the host send_table when no other queue has been assigned.
323  *
324  * TODO support XPS - but get_xps_queue not exported
325  */
netvsc_pick_tx(struct net_device * ndev,struct sk_buff * skb)326 static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
327 {
328 	int q_idx = sk_tx_queue_get(skb->sk);
329 
330 	if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
331 		/* If forwarding a packet, we use the recorded queue when
332 		 * available for better cache locality.
333 		 */
334 		if (skb_rx_queue_recorded(skb))
335 			q_idx = skb_get_rx_queue(skb);
336 		else
337 			q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
338 	}
339 
340 	return q_idx;
341 }
342 
netvsc_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)343 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
344 			       struct net_device *sb_dev)
345 {
346 	struct net_device_context *ndc = netdev_priv(ndev);
347 	struct net_device *vf_netdev;
348 	u16 txq;
349 
350 	rcu_read_lock();
351 	vf_netdev = rcu_dereference(ndc->vf_netdev);
352 	if (vf_netdev) {
353 		const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
354 
355 		if (vf_ops->ndo_select_queue)
356 			txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
357 		else
358 			txq = netdev_pick_tx(vf_netdev, skb, NULL);
359 
360 		/* Record the queue selected by VF so that it can be
361 		 * used for common case where VF has more queues than
362 		 * the synthetic device.
363 		 */
364 		qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
365 	} else {
366 		txq = netvsc_pick_tx(ndev, skb);
367 	}
368 	rcu_read_unlock();
369 
370 	while (txq >= ndev->real_num_tx_queues)
371 		txq -= ndev->real_num_tx_queues;
372 
373 	return txq;
374 }
375 
fill_pg_buf(unsigned long hvpfn,u32 offset,u32 len,struct hv_page_buffer * pb)376 static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len,
377 		       struct hv_page_buffer *pb)
378 {
379 	int j = 0;
380 
381 	hvpfn += offset >> HV_HYP_PAGE_SHIFT;
382 	offset = offset & ~HV_HYP_PAGE_MASK;
383 
384 	while (len > 0) {
385 		unsigned long bytes;
386 
387 		bytes = HV_HYP_PAGE_SIZE - offset;
388 		if (bytes > len)
389 			bytes = len;
390 		pb[j].pfn = hvpfn;
391 		pb[j].offset = offset;
392 		pb[j].len = bytes;
393 
394 		offset += bytes;
395 		len -= bytes;
396 
397 		if (offset == HV_HYP_PAGE_SIZE && len) {
398 			hvpfn++;
399 			offset = 0;
400 			j++;
401 		}
402 	}
403 
404 	return j + 1;
405 }
406 
init_page_array(void * hdr,u32 len,struct sk_buff * skb,struct hv_netvsc_packet * packet,struct hv_page_buffer * pb)407 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
408 			   struct hv_netvsc_packet *packet,
409 			   struct hv_page_buffer *pb)
410 {
411 	u32 slots_used = 0;
412 	char *data = skb->data;
413 	int frags = skb_shinfo(skb)->nr_frags;
414 	int i;
415 
416 	/* The packet is laid out thus:
417 	 * 1. hdr: RNDIS header and PPI
418 	 * 2. skb linear data
419 	 * 3. skb fragment data
420 	 */
421 	slots_used += fill_pg_buf(virt_to_hvpfn(hdr),
422 				  offset_in_hvpage(hdr),
423 				  len,
424 				  &pb[slots_used]);
425 
426 	packet->rmsg_size = len;
427 	packet->rmsg_pgcnt = slots_used;
428 
429 	slots_used += fill_pg_buf(virt_to_hvpfn(data),
430 				  offset_in_hvpage(data),
431 				  skb_headlen(skb),
432 				  &pb[slots_used]);
433 
434 	for (i = 0; i < frags; i++) {
435 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
436 
437 		slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)),
438 					  skb_frag_off(frag),
439 					  skb_frag_size(frag),
440 					  &pb[slots_used]);
441 	}
442 	return slots_used;
443 }
444 
count_skb_frag_slots(struct sk_buff * skb)445 static int count_skb_frag_slots(struct sk_buff *skb)
446 {
447 	int i, frags = skb_shinfo(skb)->nr_frags;
448 	int pages = 0;
449 
450 	for (i = 0; i < frags; i++) {
451 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
452 		unsigned long size = skb_frag_size(frag);
453 		unsigned long offset = skb_frag_off(frag);
454 
455 		/* Skip unused frames from start of page */
456 		offset &= ~HV_HYP_PAGE_MASK;
457 		pages += HVPFN_UP(offset + size);
458 	}
459 	return pages;
460 }
461 
netvsc_get_slots(struct sk_buff * skb)462 static int netvsc_get_slots(struct sk_buff *skb)
463 {
464 	char *data = skb->data;
465 	unsigned int offset = offset_in_hvpage(data);
466 	unsigned int len = skb_headlen(skb);
467 	int slots;
468 	int frag_slots;
469 
470 	slots = DIV_ROUND_UP(offset + len, HV_HYP_PAGE_SIZE);
471 	frag_slots = count_skb_frag_slots(skb);
472 	return slots + frag_slots;
473 }
474 
net_checksum_info(struct sk_buff * skb)475 static u32 net_checksum_info(struct sk_buff *skb)
476 {
477 	if (skb->protocol == htons(ETH_P_IP)) {
478 		struct iphdr *ip = ip_hdr(skb);
479 
480 		if (ip->protocol == IPPROTO_TCP)
481 			return TRANSPORT_INFO_IPV4_TCP;
482 		else if (ip->protocol == IPPROTO_UDP)
483 			return TRANSPORT_INFO_IPV4_UDP;
484 	} else {
485 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
486 
487 		if (ip6->nexthdr == IPPROTO_TCP)
488 			return TRANSPORT_INFO_IPV6_TCP;
489 		else if (ip6->nexthdr == IPPROTO_UDP)
490 			return TRANSPORT_INFO_IPV6_UDP;
491 	}
492 
493 	return TRANSPORT_INFO_NOT_IP;
494 }
495 
496 /* Send skb on the slave VF device. */
netvsc_vf_xmit(struct net_device * net,struct net_device * vf_netdev,struct sk_buff * skb)497 static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
498 			  struct sk_buff *skb)
499 {
500 	struct net_device_context *ndev_ctx = netdev_priv(net);
501 	unsigned int len = skb->len;
502 	int rc;
503 
504 	skb->dev = vf_netdev;
505 	skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
506 
507 	rc = dev_queue_xmit(skb);
508 	if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
509 		struct netvsc_vf_pcpu_stats *pcpu_stats
510 			= this_cpu_ptr(ndev_ctx->vf_stats);
511 
512 		u64_stats_update_begin(&pcpu_stats->syncp);
513 		pcpu_stats->tx_packets++;
514 		pcpu_stats->tx_bytes += len;
515 		u64_stats_update_end(&pcpu_stats->syncp);
516 	} else {
517 		this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
518 	}
519 
520 	return rc;
521 }
522 
netvsc_xmit(struct sk_buff * skb,struct net_device * net,bool xdp_tx)523 static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
524 {
525 	struct net_device_context *net_device_ctx = netdev_priv(net);
526 	struct hv_netvsc_packet *packet = NULL;
527 	int ret;
528 	unsigned int num_data_pgs;
529 	struct rndis_message *rndis_msg;
530 	struct net_device *vf_netdev;
531 	u32 rndis_msg_size;
532 	u32 hash;
533 	struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
534 
535 	/* If VF is present and up then redirect packets to it.
536 	 * Skip the VF if it is marked down or has no carrier.
537 	 * If netpoll is in uses, then VF can not be used either.
538 	 */
539 	vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
540 	if (vf_netdev && netif_running(vf_netdev) &&
541 	    netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net))
542 		return netvsc_vf_xmit(net, vf_netdev, skb);
543 
544 	/* We will atmost need two pages to describe the rndis
545 	 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
546 	 * of pages in a single packet. If skb is scattered around
547 	 * more pages we try linearizing it.
548 	 */
549 
550 	num_data_pgs = netvsc_get_slots(skb) + 2;
551 
552 	if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
553 		++net_device_ctx->eth_stats.tx_scattered;
554 
555 		if (skb_linearize(skb))
556 			goto no_memory;
557 
558 		num_data_pgs = netvsc_get_slots(skb) + 2;
559 		if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
560 			++net_device_ctx->eth_stats.tx_too_big;
561 			goto drop;
562 		}
563 	}
564 
565 	/*
566 	 * Place the rndis header in the skb head room and
567 	 * the skb->cb will be used for hv_netvsc_packet
568 	 * structure.
569 	 */
570 	ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
571 	if (ret)
572 		goto no_memory;
573 
574 	/* Use the skb control buffer for building up the packet */
575 	BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
576 			sizeof_field(struct sk_buff, cb));
577 	packet = (struct hv_netvsc_packet *)skb->cb;
578 
579 	packet->q_idx = skb_get_queue_mapping(skb);
580 
581 	packet->total_data_buflen = skb->len;
582 	packet->total_bytes = skb->len;
583 	packet->total_packets = 1;
584 
585 	rndis_msg = (struct rndis_message *)skb->head;
586 
587 	/* Add the rndis header */
588 	rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
589 	rndis_msg->msg_len = packet->total_data_buflen;
590 
591 	rndis_msg->msg.pkt = (struct rndis_packet) {
592 		.data_offset = sizeof(struct rndis_packet),
593 		.data_len = packet->total_data_buflen,
594 		.per_pkt_info_offset = sizeof(struct rndis_packet),
595 	};
596 
597 	rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
598 
599 	hash = skb_get_hash_raw(skb);
600 	if (hash != 0 && net->real_num_tx_queues > 1) {
601 		u32 *hash_info;
602 
603 		rndis_msg_size += NDIS_HASH_PPI_SIZE;
604 		hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
605 					  NBL_HASH_VALUE);
606 		*hash_info = hash;
607 	}
608 
609 	/* When using AF_PACKET we need to drop VLAN header from
610 	 * the frame and update the SKB to allow the HOST OS
611 	 * to transmit the 802.1Q packet
612 	 */
613 	if (skb->protocol == htons(ETH_P_8021Q)) {
614 		u16 vlan_tci;
615 
616 		skb_reset_mac_header(skb);
617 		if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
618 			if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
619 				++net_device_ctx->eth_stats.vlan_error;
620 				goto drop;
621 			}
622 
623 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
624 			/* Update the NDIS header pkt lengths */
625 			packet->total_data_buflen -= VLAN_HLEN;
626 			packet->total_bytes -= VLAN_HLEN;
627 			rndis_msg->msg_len = packet->total_data_buflen;
628 			rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
629 		}
630 	}
631 
632 	if (skb_vlan_tag_present(skb)) {
633 		struct ndis_pkt_8021q_info *vlan;
634 
635 		rndis_msg_size += NDIS_VLAN_PPI_SIZE;
636 		vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
637 				     IEEE_8021Q_INFO);
638 
639 		vlan->value = 0;
640 		vlan->vlanid = skb_vlan_tag_get_id(skb);
641 		vlan->cfi = skb_vlan_tag_get_cfi(skb);
642 		vlan->pri = skb_vlan_tag_get_prio(skb);
643 	}
644 
645 	if (skb_is_gso(skb)) {
646 		struct ndis_tcp_lso_info *lso_info;
647 
648 		rndis_msg_size += NDIS_LSO_PPI_SIZE;
649 		lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
650 					 TCP_LARGESEND_PKTINFO);
651 
652 		lso_info->value = 0;
653 		lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
654 		if (skb->protocol == htons(ETH_P_IP)) {
655 			lso_info->lso_v2_transmit.ip_version =
656 				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
657 			ip_hdr(skb)->tot_len = 0;
658 			ip_hdr(skb)->check = 0;
659 			tcp_hdr(skb)->check =
660 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
661 						   ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
662 		} else {
663 			lso_info->lso_v2_transmit.ip_version =
664 				NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
665 			tcp_v6_gso_csum_prep(skb);
666 		}
667 		lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
668 		lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
669 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670 		if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
671 			struct ndis_tcp_ip_checksum_info *csum_info;
672 
673 			rndis_msg_size += NDIS_CSUM_PPI_SIZE;
674 			csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
675 						  TCPIP_CHKSUM_PKTINFO);
676 
677 			csum_info->value = 0;
678 			csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
679 
680 			if (skb->protocol == htons(ETH_P_IP)) {
681 				csum_info->transmit.is_ipv4 = 1;
682 
683 				if (ip_hdr(skb)->protocol == IPPROTO_TCP)
684 					csum_info->transmit.tcp_checksum = 1;
685 				else
686 					csum_info->transmit.udp_checksum = 1;
687 			} else {
688 				csum_info->transmit.is_ipv6 = 1;
689 
690 				if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
691 					csum_info->transmit.tcp_checksum = 1;
692 				else
693 					csum_info->transmit.udp_checksum = 1;
694 			}
695 		} else {
696 			/* Can't do offload of this type of checksum */
697 			if (skb_checksum_help(skb))
698 				goto drop;
699 		}
700 	}
701 
702 	/* Start filling in the page buffers with the rndis hdr */
703 	rndis_msg->msg_len += rndis_msg_size;
704 	packet->total_data_buflen = rndis_msg->msg_len;
705 	packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
706 					       skb, packet, pb);
707 
708 	/* timestamp packet in software */
709 	skb_tx_timestamp(skb);
710 
711 	ret = netvsc_send(net, packet, rndis_msg, pb, skb, xdp_tx);
712 	if (likely(ret == 0))
713 		return NETDEV_TX_OK;
714 
715 	if (ret == -EAGAIN) {
716 		++net_device_ctx->eth_stats.tx_busy;
717 		return NETDEV_TX_BUSY;
718 	}
719 
720 	if (ret == -ENOSPC)
721 		++net_device_ctx->eth_stats.tx_no_space;
722 
723 drop:
724 	dev_kfree_skb_any(skb);
725 	net->stats.tx_dropped++;
726 
727 	return NETDEV_TX_OK;
728 
729 no_memory:
730 	++net_device_ctx->eth_stats.tx_no_memory;
731 	goto drop;
732 }
733 
netvsc_start_xmit(struct sk_buff * skb,struct net_device * ndev)734 static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
735 				     struct net_device *ndev)
736 {
737 	return netvsc_xmit(skb, ndev, false);
738 }
739 
740 /*
741  * netvsc_linkstatus_callback - Link up/down notification
742  */
netvsc_linkstatus_callback(struct net_device * net,struct rndis_message * resp)743 void netvsc_linkstatus_callback(struct net_device *net,
744 				struct rndis_message *resp)
745 {
746 	struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
747 	struct net_device_context *ndev_ctx = netdev_priv(net);
748 	struct netvsc_reconfig *event;
749 	unsigned long flags;
750 
751 	/* Ensure the packet is big enough to access its fields */
752 	if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) {
753 		netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n",
754 			   resp->msg_len);
755 		return;
756 	}
757 
758 	/* Update the physical link speed when changing to another vSwitch */
759 	if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
760 		u32 speed;
761 
762 		speed = *(u32 *)((void *)indicate
763 				 + indicate->status_buf_offset) / 10000;
764 		ndev_ctx->speed = speed;
765 		return;
766 	}
767 
768 	/* Handle these link change statuses below */
769 	if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
770 	    indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
771 	    indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
772 		return;
773 
774 	if (net->reg_state != NETREG_REGISTERED)
775 		return;
776 
777 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
778 	if (!event)
779 		return;
780 	event->event = indicate->status;
781 
782 	spin_lock_irqsave(&ndev_ctx->lock, flags);
783 	list_add_tail(&event->list, &ndev_ctx->reconfig_events);
784 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
785 
786 	schedule_delayed_work(&ndev_ctx->dwork, 0);
787 }
788 
netvsc_xdp_xmit(struct sk_buff * skb,struct net_device * ndev)789 static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
790 {
791 	int rc;
792 
793 	skb->queue_mapping = skb_get_rx_queue(skb);
794 	__skb_push(skb, ETH_HLEN);
795 
796 	rc = netvsc_xmit(skb, ndev, true);
797 
798 	if (dev_xmit_complete(rc))
799 		return;
800 
801 	dev_kfree_skb_any(skb);
802 	ndev->stats.tx_dropped++;
803 }
804 
netvsc_comp_ipcsum(struct sk_buff * skb)805 static void netvsc_comp_ipcsum(struct sk_buff *skb)
806 {
807 	struct iphdr *iph = (struct iphdr *)skb->data;
808 
809 	iph->check = 0;
810 	iph->check = ip_fast_csum(iph, iph->ihl);
811 }
812 
netvsc_alloc_recv_skb(struct net_device * net,struct netvsc_channel * nvchan,struct xdp_buff * xdp)813 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
814 					     struct netvsc_channel *nvchan,
815 					     struct xdp_buff *xdp)
816 {
817 	struct napi_struct *napi = &nvchan->napi;
818 	const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
819 	const struct ndis_tcp_ip_checksum_info *csum_info =
820 						nvchan->rsc.csum_info;
821 	const u32 *hash_info = nvchan->rsc.hash_info;
822 	struct sk_buff *skb;
823 	void *xbuf = xdp->data_hard_start;
824 	int i;
825 
826 	if (xbuf) {
827 		unsigned int hdroom = xdp->data - xdp->data_hard_start;
828 		unsigned int xlen = xdp->data_end - xdp->data;
829 		unsigned int frag_size = xdp->frame_sz;
830 
831 		skb = build_skb(xbuf, frag_size);
832 
833 		if (!skb) {
834 			__free_page(virt_to_page(xbuf));
835 			return NULL;
836 		}
837 
838 		skb_reserve(skb, hdroom);
839 		skb_put(skb, xlen);
840 		skb->dev = napi->dev;
841 	} else {
842 		skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
843 
844 		if (!skb)
845 			return NULL;
846 
847 		/* Copy to skb. This copy is needed here since the memory
848 		 * pointed by hv_netvsc_packet cannot be deallocated.
849 		 */
850 		for (i = 0; i < nvchan->rsc.cnt; i++)
851 			skb_put_data(skb, nvchan->rsc.data[i],
852 				     nvchan->rsc.len[i]);
853 	}
854 
855 	skb->protocol = eth_type_trans(skb, net);
856 
857 	/* skb is already created with CHECKSUM_NONE */
858 	skb_checksum_none_assert(skb);
859 
860 	/* Incoming packets may have IP header checksum verified by the host.
861 	 * They may not have IP header checksum computed after coalescing.
862 	 * We compute it here if the flags are set, because on Linux, the IP
863 	 * checksum is always checked.
864 	 */
865 	if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
866 	    csum_info->receive.ip_checksum_succeeded &&
867 	    skb->protocol == htons(ETH_P_IP))
868 		netvsc_comp_ipcsum(skb);
869 
870 	/* Do L4 checksum offload if enabled and present. */
871 	if (csum_info && (net->features & NETIF_F_RXCSUM)) {
872 		if (csum_info->receive.tcp_checksum_succeeded ||
873 		    csum_info->receive.udp_checksum_succeeded)
874 			skb->ip_summed = CHECKSUM_UNNECESSARY;
875 	}
876 
877 	if (hash_info && (net->features & NETIF_F_RXHASH))
878 		skb_set_hash(skb, *hash_info, PKT_HASH_TYPE_L4);
879 
880 	if (vlan) {
881 		u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
882 			(vlan->cfi ? VLAN_CFI_MASK : 0);
883 
884 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
885 				       vlan_tci);
886 	}
887 
888 	return skb;
889 }
890 
891 /*
892  * netvsc_recv_callback -  Callback when we receive a packet from the
893  * "wire" on the specified device.
894  */
netvsc_recv_callback(struct net_device * net,struct netvsc_device * net_device,struct netvsc_channel * nvchan)895 int netvsc_recv_callback(struct net_device *net,
896 			 struct netvsc_device *net_device,
897 			 struct netvsc_channel *nvchan)
898 {
899 	struct net_device_context *net_device_ctx = netdev_priv(net);
900 	struct vmbus_channel *channel = nvchan->channel;
901 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
902 	struct sk_buff *skb;
903 	struct netvsc_stats *rx_stats = &nvchan->rx_stats;
904 	struct xdp_buff xdp;
905 	u32 act;
906 
907 	if (net->reg_state != NETREG_REGISTERED)
908 		return NVSP_STAT_FAIL;
909 
910 	act = netvsc_run_xdp(net, nvchan, &xdp);
911 
912 	if (act != XDP_PASS && act != XDP_TX) {
913 		u64_stats_update_begin(&rx_stats->syncp);
914 		rx_stats->xdp_drop++;
915 		u64_stats_update_end(&rx_stats->syncp);
916 
917 		return NVSP_STAT_SUCCESS; /* consumed by XDP */
918 	}
919 
920 	/* Allocate a skb - TODO direct I/O to pages? */
921 	skb = netvsc_alloc_recv_skb(net, nvchan, &xdp);
922 
923 	if (unlikely(!skb)) {
924 		++net_device_ctx->eth_stats.rx_no_memory;
925 		return NVSP_STAT_FAIL;
926 	}
927 
928 	skb_record_rx_queue(skb, q_idx);
929 
930 	/*
931 	 * Even if injecting the packet, record the statistics
932 	 * on the synthetic device because modifying the VF device
933 	 * statistics will not work correctly.
934 	 */
935 	u64_stats_update_begin(&rx_stats->syncp);
936 	rx_stats->packets++;
937 	rx_stats->bytes += nvchan->rsc.pktlen;
938 
939 	if (skb->pkt_type == PACKET_BROADCAST)
940 		++rx_stats->broadcast;
941 	else if (skb->pkt_type == PACKET_MULTICAST)
942 		++rx_stats->multicast;
943 	u64_stats_update_end(&rx_stats->syncp);
944 
945 	if (act == XDP_TX) {
946 		netvsc_xdp_xmit(skb, net);
947 		return NVSP_STAT_SUCCESS;
948 	}
949 
950 	napi_gro_receive(&nvchan->napi, skb);
951 	return NVSP_STAT_SUCCESS;
952 }
953 
netvsc_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)954 static void netvsc_get_drvinfo(struct net_device *net,
955 			       struct ethtool_drvinfo *info)
956 {
957 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
958 	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
959 }
960 
netvsc_get_channels(struct net_device * net,struct ethtool_channels * channel)961 static void netvsc_get_channels(struct net_device *net,
962 				struct ethtool_channels *channel)
963 {
964 	struct net_device_context *net_device_ctx = netdev_priv(net);
965 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
966 
967 	if (nvdev) {
968 		channel->max_combined	= nvdev->max_chn;
969 		channel->combined_count = nvdev->num_chn;
970 	}
971 }
972 
973 /* Alloc struct netvsc_device_info, and initialize it from either existing
974  * struct netvsc_device, or from default values.
975  */
976 static
netvsc_devinfo_get(struct netvsc_device * nvdev)977 struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
978 {
979 	struct netvsc_device_info *dev_info;
980 	struct bpf_prog *prog;
981 
982 	dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
983 
984 	if (!dev_info)
985 		return NULL;
986 
987 	if (nvdev) {
988 		ASSERT_RTNL();
989 
990 		dev_info->num_chn = nvdev->num_chn;
991 		dev_info->send_sections = nvdev->send_section_cnt;
992 		dev_info->send_section_size = nvdev->send_section_size;
993 		dev_info->recv_sections = nvdev->recv_section_cnt;
994 		dev_info->recv_section_size = nvdev->recv_section_size;
995 
996 		memcpy(dev_info->rss_key, nvdev->extension->rss_key,
997 		       NETVSC_HASH_KEYLEN);
998 
999 		prog = netvsc_xdp_get(nvdev);
1000 		if (prog) {
1001 			bpf_prog_inc(prog);
1002 			dev_info->bprog = prog;
1003 		}
1004 	} else {
1005 		dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
1006 		dev_info->send_sections = NETVSC_DEFAULT_TX;
1007 		dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
1008 		dev_info->recv_sections = NETVSC_DEFAULT_RX;
1009 		dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
1010 	}
1011 
1012 	return dev_info;
1013 }
1014 
1015 /* Free struct netvsc_device_info */
netvsc_devinfo_put(struct netvsc_device_info * dev_info)1016 static void netvsc_devinfo_put(struct netvsc_device_info *dev_info)
1017 {
1018 	if (dev_info->bprog) {
1019 		ASSERT_RTNL();
1020 		bpf_prog_put(dev_info->bprog);
1021 	}
1022 
1023 	kfree(dev_info);
1024 }
1025 
netvsc_detach(struct net_device * ndev,struct netvsc_device * nvdev)1026 static int netvsc_detach(struct net_device *ndev,
1027 			 struct netvsc_device *nvdev)
1028 {
1029 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1030 	struct hv_device *hdev = ndev_ctx->device_ctx;
1031 	int ret;
1032 
1033 	/* Don't try continuing to try and setup sub channels */
1034 	if (cancel_work_sync(&nvdev->subchan_work))
1035 		nvdev->num_chn = 1;
1036 
1037 	netvsc_xdp_set(ndev, NULL, NULL, nvdev);
1038 
1039 	/* If device was up (receiving) then shutdown */
1040 	if (netif_running(ndev)) {
1041 		netvsc_tx_disable(nvdev, ndev);
1042 
1043 		ret = rndis_filter_close(nvdev);
1044 		if (ret) {
1045 			netdev_err(ndev,
1046 				   "unable to close device (ret %d).\n", ret);
1047 			return ret;
1048 		}
1049 
1050 		ret = netvsc_wait_until_empty(nvdev);
1051 		if (ret) {
1052 			netdev_err(ndev,
1053 				   "Ring buffer not empty after closing rndis\n");
1054 			return ret;
1055 		}
1056 	}
1057 
1058 	netif_device_detach(ndev);
1059 
1060 	rndis_filter_device_remove(hdev, nvdev);
1061 
1062 	return 0;
1063 }
1064 
netvsc_attach(struct net_device * ndev,struct netvsc_device_info * dev_info)1065 static int netvsc_attach(struct net_device *ndev,
1066 			 struct netvsc_device_info *dev_info)
1067 {
1068 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1069 	struct hv_device *hdev = ndev_ctx->device_ctx;
1070 	struct netvsc_device *nvdev;
1071 	struct rndis_device *rdev;
1072 	struct bpf_prog *prog;
1073 	int ret = 0;
1074 
1075 	nvdev = rndis_filter_device_add(hdev, dev_info);
1076 	if (IS_ERR(nvdev))
1077 		return PTR_ERR(nvdev);
1078 
1079 	if (nvdev->num_chn > 1) {
1080 		ret = rndis_set_subchannel(ndev, nvdev, dev_info);
1081 
1082 		/* if unavailable, just proceed with one queue */
1083 		if (ret) {
1084 			nvdev->max_chn = 1;
1085 			nvdev->num_chn = 1;
1086 		}
1087 	}
1088 
1089 	prog = dev_info->bprog;
1090 	if (prog) {
1091 		bpf_prog_inc(prog);
1092 		ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
1093 		if (ret) {
1094 			bpf_prog_put(prog);
1095 			goto err1;
1096 		}
1097 	}
1098 
1099 	/* In any case device is now ready */
1100 	nvdev->tx_disable = false;
1101 	netif_device_attach(ndev);
1102 
1103 	/* Note: enable and attach happen when sub-channels setup */
1104 	netif_carrier_off(ndev);
1105 
1106 	if (netif_running(ndev)) {
1107 		ret = rndis_filter_open(nvdev);
1108 		if (ret)
1109 			goto err2;
1110 
1111 		rdev = nvdev->extension;
1112 		if (!rdev->link_state)
1113 			netif_carrier_on(ndev);
1114 	}
1115 
1116 	return 0;
1117 
1118 err2:
1119 	netif_device_detach(ndev);
1120 
1121 err1:
1122 	rndis_filter_device_remove(hdev, nvdev);
1123 
1124 	return ret;
1125 }
1126 
netvsc_set_channels(struct net_device * net,struct ethtool_channels * channels)1127 static int netvsc_set_channels(struct net_device *net,
1128 			       struct ethtool_channels *channels)
1129 {
1130 	struct net_device_context *net_device_ctx = netdev_priv(net);
1131 	struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1132 	unsigned int orig, count = channels->combined_count;
1133 	struct netvsc_device_info *device_info;
1134 	int ret;
1135 
1136 	/* We do not support separate count for rx, tx, or other */
1137 	if (count == 0 ||
1138 	    channels->rx_count || channels->tx_count || channels->other_count)
1139 		return -EINVAL;
1140 
1141 	if (!nvdev || nvdev->destroy)
1142 		return -ENODEV;
1143 
1144 	if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1145 		return -EINVAL;
1146 
1147 	if (count > nvdev->max_chn)
1148 		return -EINVAL;
1149 
1150 	orig = nvdev->num_chn;
1151 
1152 	device_info = netvsc_devinfo_get(nvdev);
1153 
1154 	if (!device_info)
1155 		return -ENOMEM;
1156 
1157 	device_info->num_chn = count;
1158 
1159 	ret = netvsc_detach(net, nvdev);
1160 	if (ret)
1161 		goto out;
1162 
1163 	ret = netvsc_attach(net, device_info);
1164 	if (ret) {
1165 		device_info->num_chn = orig;
1166 		if (netvsc_attach(net, device_info))
1167 			netdev_err(net, "restoring channel setting failed\n");
1168 	}
1169 
1170 out:
1171 	netvsc_devinfo_put(device_info);
1172 	return ret;
1173 }
1174 
netvsc_init_settings(struct net_device * dev)1175 static void netvsc_init_settings(struct net_device *dev)
1176 {
1177 	struct net_device_context *ndc = netdev_priv(dev);
1178 
1179 	ndc->l4_hash = HV_DEFAULT_L4HASH;
1180 
1181 	ndc->speed = SPEED_UNKNOWN;
1182 	ndc->duplex = DUPLEX_FULL;
1183 
1184 	dev->features = NETIF_F_LRO;
1185 }
1186 
netvsc_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1187 static int netvsc_get_link_ksettings(struct net_device *dev,
1188 				     struct ethtool_link_ksettings *cmd)
1189 {
1190 	struct net_device_context *ndc = netdev_priv(dev);
1191 	struct net_device *vf_netdev;
1192 
1193 	vf_netdev = rtnl_dereference(ndc->vf_netdev);
1194 
1195 	if (vf_netdev)
1196 		return __ethtool_get_link_ksettings(vf_netdev, cmd);
1197 
1198 	cmd->base.speed = ndc->speed;
1199 	cmd->base.duplex = ndc->duplex;
1200 	cmd->base.port = PORT_OTHER;
1201 
1202 	return 0;
1203 }
1204 
netvsc_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1205 static int netvsc_set_link_ksettings(struct net_device *dev,
1206 				     const struct ethtool_link_ksettings *cmd)
1207 {
1208 	struct net_device_context *ndc = netdev_priv(dev);
1209 	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1210 
1211 	if (vf_netdev) {
1212 		if (!vf_netdev->ethtool_ops->set_link_ksettings)
1213 			return -EOPNOTSUPP;
1214 
1215 		return vf_netdev->ethtool_ops->set_link_ksettings(vf_netdev,
1216 								  cmd);
1217 	}
1218 
1219 	return ethtool_virtdev_set_link_ksettings(dev, cmd,
1220 						  &ndc->speed, &ndc->duplex);
1221 }
1222 
netvsc_change_mtu(struct net_device * ndev,int mtu)1223 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1224 {
1225 	struct net_device_context *ndevctx = netdev_priv(ndev);
1226 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1227 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1228 	int orig_mtu = ndev->mtu;
1229 	struct netvsc_device_info *device_info;
1230 	int ret = 0;
1231 
1232 	if (!nvdev || nvdev->destroy)
1233 		return -ENODEV;
1234 
1235 	device_info = netvsc_devinfo_get(nvdev);
1236 
1237 	if (!device_info)
1238 		return -ENOMEM;
1239 
1240 	/* Change MTU of underlying VF netdev first. */
1241 	if (vf_netdev) {
1242 		ret = dev_set_mtu(vf_netdev, mtu);
1243 		if (ret)
1244 			goto out;
1245 	}
1246 
1247 	ret = netvsc_detach(ndev, nvdev);
1248 	if (ret)
1249 		goto rollback_vf;
1250 
1251 	ndev->mtu = mtu;
1252 
1253 	ret = netvsc_attach(ndev, device_info);
1254 	if (!ret)
1255 		goto out;
1256 
1257 	/* Attempt rollback to original MTU */
1258 	ndev->mtu = orig_mtu;
1259 
1260 	if (netvsc_attach(ndev, device_info))
1261 		netdev_err(ndev, "restoring mtu failed\n");
1262 rollback_vf:
1263 	if (vf_netdev)
1264 		dev_set_mtu(vf_netdev, orig_mtu);
1265 
1266 out:
1267 	netvsc_devinfo_put(device_info);
1268 	return ret;
1269 }
1270 
netvsc_get_vf_stats(struct net_device * net,struct netvsc_vf_pcpu_stats * tot)1271 static void netvsc_get_vf_stats(struct net_device *net,
1272 				struct netvsc_vf_pcpu_stats *tot)
1273 {
1274 	struct net_device_context *ndev_ctx = netdev_priv(net);
1275 	int i;
1276 
1277 	memset(tot, 0, sizeof(*tot));
1278 
1279 	for_each_possible_cpu(i) {
1280 		const struct netvsc_vf_pcpu_stats *stats
1281 			= per_cpu_ptr(ndev_ctx->vf_stats, i);
1282 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1283 		unsigned int start;
1284 
1285 		do {
1286 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1287 			rx_packets = stats->rx_packets;
1288 			tx_packets = stats->tx_packets;
1289 			rx_bytes = stats->rx_bytes;
1290 			tx_bytes = stats->tx_bytes;
1291 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1292 
1293 		tot->rx_packets += rx_packets;
1294 		tot->tx_packets += tx_packets;
1295 		tot->rx_bytes   += rx_bytes;
1296 		tot->tx_bytes   += tx_bytes;
1297 		tot->tx_dropped += stats->tx_dropped;
1298 	}
1299 }
1300 
netvsc_get_pcpu_stats(struct net_device * net,struct netvsc_ethtool_pcpu_stats * pcpu_tot)1301 static void netvsc_get_pcpu_stats(struct net_device *net,
1302 				  struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1303 {
1304 	struct net_device_context *ndev_ctx = netdev_priv(net);
1305 	struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1306 	int i;
1307 
1308 	/* fetch percpu stats of vf */
1309 	for_each_possible_cpu(i) {
1310 		const struct netvsc_vf_pcpu_stats *stats =
1311 			per_cpu_ptr(ndev_ctx->vf_stats, i);
1312 		struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1313 		unsigned int start;
1314 
1315 		do {
1316 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1317 			this_tot->vf_rx_packets = stats->rx_packets;
1318 			this_tot->vf_tx_packets = stats->tx_packets;
1319 			this_tot->vf_rx_bytes = stats->rx_bytes;
1320 			this_tot->vf_tx_bytes = stats->tx_bytes;
1321 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1322 		this_tot->rx_packets = this_tot->vf_rx_packets;
1323 		this_tot->tx_packets = this_tot->vf_tx_packets;
1324 		this_tot->rx_bytes   = this_tot->vf_rx_bytes;
1325 		this_tot->tx_bytes   = this_tot->vf_tx_bytes;
1326 	}
1327 
1328 	/* fetch percpu stats of netvsc */
1329 	for (i = 0; i < nvdev->num_chn; i++) {
1330 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1331 		const struct netvsc_stats *stats;
1332 		struct netvsc_ethtool_pcpu_stats *this_tot =
1333 			&pcpu_tot[nvchan->channel->target_cpu];
1334 		u64 packets, bytes;
1335 		unsigned int start;
1336 
1337 		stats = &nvchan->tx_stats;
1338 		do {
1339 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1340 			packets = stats->packets;
1341 			bytes = stats->bytes;
1342 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1343 
1344 		this_tot->tx_bytes	+= bytes;
1345 		this_tot->tx_packets	+= packets;
1346 
1347 		stats = &nvchan->rx_stats;
1348 		do {
1349 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1350 			packets = stats->packets;
1351 			bytes = stats->bytes;
1352 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1353 
1354 		this_tot->rx_bytes	+= bytes;
1355 		this_tot->rx_packets	+= packets;
1356 	}
1357 }
1358 
netvsc_get_stats64(struct net_device * net,struct rtnl_link_stats64 * t)1359 static void netvsc_get_stats64(struct net_device *net,
1360 			       struct rtnl_link_stats64 *t)
1361 {
1362 	struct net_device_context *ndev_ctx = netdev_priv(net);
1363 	struct netvsc_device *nvdev;
1364 	struct netvsc_vf_pcpu_stats vf_tot;
1365 	int i;
1366 
1367 	rcu_read_lock();
1368 
1369 	nvdev = rcu_dereference(ndev_ctx->nvdev);
1370 	if (!nvdev)
1371 		goto out;
1372 
1373 	netdev_stats_to_stats64(t, &net->stats);
1374 
1375 	netvsc_get_vf_stats(net, &vf_tot);
1376 	t->rx_packets += vf_tot.rx_packets;
1377 	t->tx_packets += vf_tot.tx_packets;
1378 	t->rx_bytes   += vf_tot.rx_bytes;
1379 	t->tx_bytes   += vf_tot.tx_bytes;
1380 	t->tx_dropped += vf_tot.tx_dropped;
1381 
1382 	for (i = 0; i < nvdev->num_chn; i++) {
1383 		const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1384 		const struct netvsc_stats *stats;
1385 		u64 packets, bytes, multicast;
1386 		unsigned int start;
1387 
1388 		stats = &nvchan->tx_stats;
1389 		do {
1390 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1391 			packets = stats->packets;
1392 			bytes = stats->bytes;
1393 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1394 
1395 		t->tx_bytes	+= bytes;
1396 		t->tx_packets	+= packets;
1397 
1398 		stats = &nvchan->rx_stats;
1399 		do {
1400 			start = u64_stats_fetch_begin_irq(&stats->syncp);
1401 			packets = stats->packets;
1402 			bytes = stats->bytes;
1403 			multicast = stats->multicast + stats->broadcast;
1404 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1405 
1406 		t->rx_bytes	+= bytes;
1407 		t->rx_packets	+= packets;
1408 		t->multicast	+= multicast;
1409 	}
1410 out:
1411 	rcu_read_unlock();
1412 }
1413 
netvsc_set_mac_addr(struct net_device * ndev,void * p)1414 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1415 {
1416 	struct net_device_context *ndc = netdev_priv(ndev);
1417 	struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
1418 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1419 	struct sockaddr *addr = p;
1420 	int err;
1421 
1422 	err = eth_prepare_mac_addr_change(ndev, p);
1423 	if (err)
1424 		return err;
1425 
1426 	if (!nvdev)
1427 		return -ENODEV;
1428 
1429 	if (vf_netdev) {
1430 		err = dev_set_mac_address(vf_netdev, addr, NULL);
1431 		if (err)
1432 			return err;
1433 	}
1434 
1435 	err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1436 	if (!err) {
1437 		eth_commit_mac_addr_change(ndev, p);
1438 	} else if (vf_netdev) {
1439 		/* rollback change on VF */
1440 		memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1441 		dev_set_mac_address(vf_netdev, addr, NULL);
1442 	}
1443 
1444 	return err;
1445 }
1446 
1447 static const struct {
1448 	char name[ETH_GSTRING_LEN];
1449 	u16 offset;
1450 } netvsc_stats[] = {
1451 	{ "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
1452 	{ "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
1453 	{ "tx_no_space",  offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1454 	{ "tx_too_big",	  offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1455 	{ "tx_busy",	  offsetof(struct netvsc_ethtool_stats, tx_busy) },
1456 	{ "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1457 	{ "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
1458 	{ "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
1459 	{ "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1460 	{ "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
1461 	{ "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
1462 }, pcpu_stats[] = {
1463 	{ "cpu%u_rx_packets",
1464 		offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1465 	{ "cpu%u_rx_bytes",
1466 		offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1467 	{ "cpu%u_tx_packets",
1468 		offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1469 	{ "cpu%u_tx_bytes",
1470 		offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1471 	{ "cpu%u_vf_rx_packets",
1472 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1473 	{ "cpu%u_vf_rx_bytes",
1474 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1475 	{ "cpu%u_vf_tx_packets",
1476 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1477 	{ "cpu%u_vf_tx_bytes",
1478 		offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
1479 }, vf_stats[] = {
1480 	{ "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1481 	{ "vf_rx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1482 	{ "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1483 	{ "vf_tx_bytes",   offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1484 	{ "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
1485 };
1486 
1487 #define NETVSC_GLOBAL_STATS_LEN	ARRAY_SIZE(netvsc_stats)
1488 #define NETVSC_VF_STATS_LEN	ARRAY_SIZE(vf_stats)
1489 
1490 /* statistics per queue (rx/tx packets/bytes) */
1491 #define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1492 
1493 /* 5 statistics per queue (rx/tx packets/bytes, rx xdp_drop) */
1494 #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 5)
1495 
netvsc_get_sset_count(struct net_device * dev,int string_set)1496 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1497 {
1498 	struct net_device_context *ndc = netdev_priv(dev);
1499 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1500 
1501 	if (!nvdev)
1502 		return -ENODEV;
1503 
1504 	switch (string_set) {
1505 	case ETH_SS_STATS:
1506 		return NETVSC_GLOBAL_STATS_LEN
1507 			+ NETVSC_VF_STATS_LEN
1508 			+ NETVSC_QUEUE_STATS_LEN(nvdev)
1509 			+ NETVSC_PCPU_STATS_LEN;
1510 	default:
1511 		return -EINVAL;
1512 	}
1513 }
1514 
netvsc_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1515 static void netvsc_get_ethtool_stats(struct net_device *dev,
1516 				     struct ethtool_stats *stats, u64 *data)
1517 {
1518 	struct net_device_context *ndc = netdev_priv(dev);
1519 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1520 	const void *nds = &ndc->eth_stats;
1521 	const struct netvsc_stats *qstats;
1522 	struct netvsc_vf_pcpu_stats sum;
1523 	struct netvsc_ethtool_pcpu_stats *pcpu_sum;
1524 	unsigned int start;
1525 	u64 packets, bytes;
1526 	u64 xdp_drop;
1527 	int i, j, cpu;
1528 
1529 	if (!nvdev)
1530 		return;
1531 
1532 	for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
1533 		data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1534 
1535 	netvsc_get_vf_stats(dev, &sum);
1536 	for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1537 		data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1538 
1539 	for (j = 0; j < nvdev->num_chn; j++) {
1540 		qstats = &nvdev->chan_table[j].tx_stats;
1541 
1542 		do {
1543 			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1544 			packets = qstats->packets;
1545 			bytes = qstats->bytes;
1546 		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1547 		data[i++] = packets;
1548 		data[i++] = bytes;
1549 
1550 		qstats = &nvdev->chan_table[j].rx_stats;
1551 		do {
1552 			start = u64_stats_fetch_begin_irq(&qstats->syncp);
1553 			packets = qstats->packets;
1554 			bytes = qstats->bytes;
1555 			xdp_drop = qstats->xdp_drop;
1556 		} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1557 		data[i++] = packets;
1558 		data[i++] = bytes;
1559 		data[i++] = xdp_drop;
1560 	}
1561 
1562 	pcpu_sum = kvmalloc_array(num_possible_cpus(),
1563 				  sizeof(struct netvsc_ethtool_pcpu_stats),
1564 				  GFP_KERNEL);
1565 	if (!pcpu_sum)
1566 		return;
1567 
1568 	netvsc_get_pcpu_stats(dev, pcpu_sum);
1569 	for_each_present_cpu(cpu) {
1570 		struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1571 
1572 		for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1573 			data[i++] = *(u64 *)((void *)this_sum
1574 					     + pcpu_stats[j].offset);
1575 	}
1576 	kvfree(pcpu_sum);
1577 }
1578 
netvsc_get_strings(struct net_device * dev,u32 stringset,u8 * data)1579 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1580 {
1581 	struct net_device_context *ndc = netdev_priv(dev);
1582 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1583 	u8 *p = data;
1584 	int i, cpu;
1585 
1586 	if (!nvdev)
1587 		return;
1588 
1589 	switch (stringset) {
1590 	case ETH_SS_STATS:
1591 		for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1592 			memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1593 			p += ETH_GSTRING_LEN;
1594 		}
1595 
1596 		for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1597 			memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1598 			p += ETH_GSTRING_LEN;
1599 		}
1600 
1601 		for (i = 0; i < nvdev->num_chn; i++) {
1602 			sprintf(p, "tx_queue_%u_packets", i);
1603 			p += ETH_GSTRING_LEN;
1604 			sprintf(p, "tx_queue_%u_bytes", i);
1605 			p += ETH_GSTRING_LEN;
1606 			sprintf(p, "rx_queue_%u_packets", i);
1607 			p += ETH_GSTRING_LEN;
1608 			sprintf(p, "rx_queue_%u_bytes", i);
1609 			p += ETH_GSTRING_LEN;
1610 			sprintf(p, "rx_queue_%u_xdp_drop", i);
1611 			p += ETH_GSTRING_LEN;
1612 		}
1613 
1614 		for_each_present_cpu(cpu) {
1615 			for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1616 				sprintf(p, pcpu_stats[i].name, cpu);
1617 				p += ETH_GSTRING_LEN;
1618 			}
1619 		}
1620 
1621 		break;
1622 	}
1623 }
1624 
1625 static int
netvsc_get_rss_hash_opts(struct net_device_context * ndc,struct ethtool_rxnfc * info)1626 netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1627 			 struct ethtool_rxnfc *info)
1628 {
1629 	const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1630 
1631 	info->data = RXH_IP_SRC | RXH_IP_DST;
1632 
1633 	switch (info->flow_type) {
1634 	case TCP_V4_FLOW:
1635 		if (ndc->l4_hash & HV_TCP4_L4HASH)
1636 			info->data |= l4_flag;
1637 
1638 		break;
1639 
1640 	case TCP_V6_FLOW:
1641 		if (ndc->l4_hash & HV_TCP6_L4HASH)
1642 			info->data |= l4_flag;
1643 
1644 		break;
1645 
1646 	case UDP_V4_FLOW:
1647 		if (ndc->l4_hash & HV_UDP4_L4HASH)
1648 			info->data |= l4_flag;
1649 
1650 		break;
1651 
1652 	case UDP_V6_FLOW:
1653 		if (ndc->l4_hash & HV_UDP6_L4HASH)
1654 			info->data |= l4_flag;
1655 
1656 		break;
1657 
1658 	case IPV4_FLOW:
1659 	case IPV6_FLOW:
1660 		break;
1661 	default:
1662 		info->data = 0;
1663 		break;
1664 	}
1665 
1666 	return 0;
1667 }
1668 
1669 static int
netvsc_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules)1670 netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1671 		 u32 *rules)
1672 {
1673 	struct net_device_context *ndc = netdev_priv(dev);
1674 	struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1675 
1676 	if (!nvdev)
1677 		return -ENODEV;
1678 
1679 	switch (info->cmd) {
1680 	case ETHTOOL_GRXRINGS:
1681 		info->data = nvdev->num_chn;
1682 		return 0;
1683 
1684 	case ETHTOOL_GRXFH:
1685 		return netvsc_get_rss_hash_opts(ndc, info);
1686 	}
1687 	return -EOPNOTSUPP;
1688 }
1689 
netvsc_set_rss_hash_opts(struct net_device_context * ndc,struct ethtool_rxnfc * info)1690 static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1691 				    struct ethtool_rxnfc *info)
1692 {
1693 	if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1694 			   RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
1695 		switch (info->flow_type) {
1696 		case TCP_V4_FLOW:
1697 			ndc->l4_hash |= HV_TCP4_L4HASH;
1698 			break;
1699 
1700 		case TCP_V6_FLOW:
1701 			ndc->l4_hash |= HV_TCP6_L4HASH;
1702 			break;
1703 
1704 		case UDP_V4_FLOW:
1705 			ndc->l4_hash |= HV_UDP4_L4HASH;
1706 			break;
1707 
1708 		case UDP_V6_FLOW:
1709 			ndc->l4_hash |= HV_UDP6_L4HASH;
1710 			break;
1711 
1712 		default:
1713 			return -EOPNOTSUPP;
1714 		}
1715 
1716 		return 0;
1717 	}
1718 
1719 	if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
1720 		switch (info->flow_type) {
1721 		case TCP_V4_FLOW:
1722 			ndc->l4_hash &= ~HV_TCP4_L4HASH;
1723 			break;
1724 
1725 		case TCP_V6_FLOW:
1726 			ndc->l4_hash &= ~HV_TCP6_L4HASH;
1727 			break;
1728 
1729 		case UDP_V4_FLOW:
1730 			ndc->l4_hash &= ~HV_UDP4_L4HASH;
1731 			break;
1732 
1733 		case UDP_V6_FLOW:
1734 			ndc->l4_hash &= ~HV_UDP6_L4HASH;
1735 			break;
1736 
1737 		default:
1738 			return -EOPNOTSUPP;
1739 		}
1740 
1741 		return 0;
1742 	}
1743 
1744 	return -EOPNOTSUPP;
1745 }
1746 
1747 static int
netvsc_set_rxnfc(struct net_device * ndev,struct ethtool_rxnfc * info)1748 netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1749 {
1750 	struct net_device_context *ndc = netdev_priv(ndev);
1751 
1752 	if (info->cmd == ETHTOOL_SRXFH)
1753 		return netvsc_set_rss_hash_opts(ndc, info);
1754 
1755 	return -EOPNOTSUPP;
1756 }
1757 
netvsc_get_rxfh_key_size(struct net_device * dev)1758 static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1759 {
1760 	return NETVSC_HASH_KEYLEN;
1761 }
1762 
netvsc_rss_indir_size(struct net_device * dev)1763 static u32 netvsc_rss_indir_size(struct net_device *dev)
1764 {
1765 	return ITAB_NUM;
1766 }
1767 
netvsc_get_rxfh(struct net_device * dev,u32 * indir,u8 * key,u8 * hfunc)1768 static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1769 			   u8 *hfunc)
1770 {
1771 	struct net_device_context *ndc = netdev_priv(dev);
1772 	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1773 	struct rndis_device *rndis_dev;
1774 	int i;
1775 
1776 	if (!ndev)
1777 		return -ENODEV;
1778 
1779 	if (hfunc)
1780 		*hfunc = ETH_RSS_HASH_TOP;	/* Toeplitz */
1781 
1782 	rndis_dev = ndev->extension;
1783 	if (indir) {
1784 		for (i = 0; i < ITAB_NUM; i++)
1785 			indir[i] = ndc->rx_table[i];
1786 	}
1787 
1788 	if (key)
1789 		memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1790 
1791 	return 0;
1792 }
1793 
netvsc_set_rxfh(struct net_device * dev,const u32 * indir,const u8 * key,const u8 hfunc)1794 static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1795 			   const u8 *key, const u8 hfunc)
1796 {
1797 	struct net_device_context *ndc = netdev_priv(dev);
1798 	struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1799 	struct rndis_device *rndis_dev;
1800 	int i;
1801 
1802 	if (!ndev)
1803 		return -ENODEV;
1804 
1805 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1806 		return -EOPNOTSUPP;
1807 
1808 	rndis_dev = ndev->extension;
1809 	if (indir) {
1810 		for (i = 0; i < ITAB_NUM; i++)
1811 			if (indir[i] >= ndev->num_chn)
1812 				return -EINVAL;
1813 
1814 		for (i = 0; i < ITAB_NUM; i++)
1815 			ndc->rx_table[i] = indir[i];
1816 	}
1817 
1818 	if (!key) {
1819 		if (!indir)
1820 			return 0;
1821 
1822 		key = rndis_dev->rss_key;
1823 	}
1824 
1825 	return rndis_filter_set_rss_param(rndis_dev, key);
1826 }
1827 
1828 /* Hyper-V RNDIS protocol does not have ring in the HW sense.
1829  * It does have pre-allocated receive area which is divided into sections.
1830  */
__netvsc_get_ringparam(struct netvsc_device * nvdev,struct ethtool_ringparam * ring)1831 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1832 				   struct ethtool_ringparam *ring)
1833 {
1834 	u32 max_buf_size;
1835 
1836 	ring->rx_pending = nvdev->recv_section_cnt;
1837 	ring->tx_pending = nvdev->send_section_cnt;
1838 
1839 	if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1840 		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1841 	else
1842 		max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1843 
1844 	ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1845 	ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1846 		/ nvdev->send_section_size;
1847 }
1848 
netvsc_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring)1849 static void netvsc_get_ringparam(struct net_device *ndev,
1850 				 struct ethtool_ringparam *ring)
1851 {
1852 	struct net_device_context *ndevctx = netdev_priv(ndev);
1853 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1854 
1855 	if (!nvdev)
1856 		return;
1857 
1858 	__netvsc_get_ringparam(nvdev, ring);
1859 }
1860 
netvsc_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring)1861 static int netvsc_set_ringparam(struct net_device *ndev,
1862 				struct ethtool_ringparam *ring)
1863 {
1864 	struct net_device_context *ndevctx = netdev_priv(ndev);
1865 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1866 	struct netvsc_device_info *device_info;
1867 	struct ethtool_ringparam orig;
1868 	u32 new_tx, new_rx;
1869 	int ret = 0;
1870 
1871 	if (!nvdev || nvdev->destroy)
1872 		return -ENODEV;
1873 
1874 	memset(&orig, 0, sizeof(orig));
1875 	__netvsc_get_ringparam(nvdev, &orig);
1876 
1877 	new_tx = clamp_t(u32, ring->tx_pending,
1878 			 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1879 	new_rx = clamp_t(u32, ring->rx_pending,
1880 			 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1881 
1882 	if (new_tx == orig.tx_pending &&
1883 	    new_rx == orig.rx_pending)
1884 		return 0;	 /* no change */
1885 
1886 	device_info = netvsc_devinfo_get(nvdev);
1887 
1888 	if (!device_info)
1889 		return -ENOMEM;
1890 
1891 	device_info->send_sections = new_tx;
1892 	device_info->recv_sections = new_rx;
1893 
1894 	ret = netvsc_detach(ndev, nvdev);
1895 	if (ret)
1896 		goto out;
1897 
1898 	ret = netvsc_attach(ndev, device_info);
1899 	if (ret) {
1900 		device_info->send_sections = orig.tx_pending;
1901 		device_info->recv_sections = orig.rx_pending;
1902 
1903 		if (netvsc_attach(ndev, device_info))
1904 			netdev_err(ndev, "restoring ringparam failed");
1905 	}
1906 
1907 out:
1908 	netvsc_devinfo_put(device_info);
1909 	return ret;
1910 }
1911 
netvsc_fix_features(struct net_device * ndev,netdev_features_t features)1912 static netdev_features_t netvsc_fix_features(struct net_device *ndev,
1913 					     netdev_features_t features)
1914 {
1915 	struct net_device_context *ndevctx = netdev_priv(ndev);
1916 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1917 
1918 	if (!nvdev || nvdev->destroy)
1919 		return features;
1920 
1921 	if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
1922 		features ^= NETIF_F_LRO;
1923 		netdev_info(ndev, "Skip LRO - unsupported with XDP\n");
1924 	}
1925 
1926 	return features;
1927 }
1928 
netvsc_set_features(struct net_device * ndev,netdev_features_t features)1929 static int netvsc_set_features(struct net_device *ndev,
1930 			       netdev_features_t features)
1931 {
1932 	netdev_features_t change = features ^ ndev->features;
1933 	struct net_device_context *ndevctx = netdev_priv(ndev);
1934 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1935 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
1936 	struct ndis_offload_params offloads;
1937 	int ret = 0;
1938 
1939 	if (!nvdev || nvdev->destroy)
1940 		return -ENODEV;
1941 
1942 	if (!(change & NETIF_F_LRO))
1943 		goto syncvf;
1944 
1945 	memset(&offloads, 0, sizeof(struct ndis_offload_params));
1946 
1947 	if (features & NETIF_F_LRO) {
1948 		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1949 		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1950 	} else {
1951 		offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1952 		offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1953 	}
1954 
1955 	ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1956 
1957 	if (ret) {
1958 		features ^= NETIF_F_LRO;
1959 		ndev->features = features;
1960 	}
1961 
1962 syncvf:
1963 	if (!vf_netdev)
1964 		return ret;
1965 
1966 	vf_netdev->wanted_features = features;
1967 	netdev_update_features(vf_netdev);
1968 
1969 	return ret;
1970 }
1971 
netvsc_get_regs_len(struct net_device * netdev)1972 static int netvsc_get_regs_len(struct net_device *netdev)
1973 {
1974 	return VRSS_SEND_TAB_SIZE * sizeof(u32);
1975 }
1976 
netvsc_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)1977 static void netvsc_get_regs(struct net_device *netdev,
1978 			    struct ethtool_regs *regs, void *p)
1979 {
1980 	struct net_device_context *ndc = netdev_priv(netdev);
1981 	u32 *regs_buff = p;
1982 
1983 	/* increase the version, if buffer format is changed. */
1984 	regs->version = 1;
1985 
1986 	memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
1987 }
1988 
netvsc_get_msglevel(struct net_device * ndev)1989 static u32 netvsc_get_msglevel(struct net_device *ndev)
1990 {
1991 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1992 
1993 	return ndev_ctx->msg_enable;
1994 }
1995 
netvsc_set_msglevel(struct net_device * ndev,u32 val)1996 static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1997 {
1998 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
1999 
2000 	ndev_ctx->msg_enable = val;
2001 }
2002 
2003 static const struct ethtool_ops ethtool_ops = {
2004 	.get_drvinfo	= netvsc_get_drvinfo,
2005 	.get_regs_len	= netvsc_get_regs_len,
2006 	.get_regs	= netvsc_get_regs,
2007 	.get_msglevel	= netvsc_get_msglevel,
2008 	.set_msglevel	= netvsc_set_msglevel,
2009 	.get_link	= ethtool_op_get_link,
2010 	.get_ethtool_stats = netvsc_get_ethtool_stats,
2011 	.get_sset_count = netvsc_get_sset_count,
2012 	.get_strings	= netvsc_get_strings,
2013 	.get_channels   = netvsc_get_channels,
2014 	.set_channels   = netvsc_set_channels,
2015 	.get_ts_info	= ethtool_op_get_ts_info,
2016 	.get_rxnfc	= netvsc_get_rxnfc,
2017 	.set_rxnfc	= netvsc_set_rxnfc,
2018 	.get_rxfh_key_size = netvsc_get_rxfh_key_size,
2019 	.get_rxfh_indir_size = netvsc_rss_indir_size,
2020 	.get_rxfh	= netvsc_get_rxfh,
2021 	.set_rxfh	= netvsc_set_rxfh,
2022 	.get_link_ksettings = netvsc_get_link_ksettings,
2023 	.set_link_ksettings = netvsc_set_link_ksettings,
2024 	.get_ringparam	= netvsc_get_ringparam,
2025 	.set_ringparam	= netvsc_set_ringparam,
2026 };
2027 
2028 static const struct net_device_ops device_ops = {
2029 	.ndo_open =			netvsc_open,
2030 	.ndo_stop =			netvsc_close,
2031 	.ndo_start_xmit =		netvsc_start_xmit,
2032 	.ndo_change_rx_flags =		netvsc_change_rx_flags,
2033 	.ndo_set_rx_mode =		netvsc_set_rx_mode,
2034 	.ndo_fix_features =		netvsc_fix_features,
2035 	.ndo_set_features =		netvsc_set_features,
2036 	.ndo_change_mtu =		netvsc_change_mtu,
2037 	.ndo_validate_addr =		eth_validate_addr,
2038 	.ndo_set_mac_address =		netvsc_set_mac_addr,
2039 	.ndo_select_queue =		netvsc_select_queue,
2040 	.ndo_get_stats64 =		netvsc_get_stats64,
2041 	.ndo_bpf =			netvsc_bpf,
2042 };
2043 
2044 /*
2045  * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
2046  * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
2047  * present send GARP packet to network peers with netif_notify_peers().
2048  */
netvsc_link_change(struct work_struct * w)2049 static void netvsc_link_change(struct work_struct *w)
2050 {
2051 	struct net_device_context *ndev_ctx =
2052 		container_of(w, struct net_device_context, dwork.work);
2053 	struct hv_device *device_obj = ndev_ctx->device_ctx;
2054 	struct net_device *net = hv_get_drvdata(device_obj);
2055 	struct netvsc_device *net_device;
2056 	struct rndis_device *rdev;
2057 	struct netvsc_reconfig *event = NULL;
2058 	bool notify = false, reschedule = false;
2059 	unsigned long flags, next_reconfig, delay;
2060 
2061 	/* if changes are happening, comeback later */
2062 	if (!rtnl_trylock()) {
2063 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2064 		return;
2065 	}
2066 
2067 	net_device = rtnl_dereference(ndev_ctx->nvdev);
2068 	if (!net_device)
2069 		goto out_unlock;
2070 
2071 	rdev = net_device->extension;
2072 
2073 	next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
2074 	if (time_is_after_jiffies(next_reconfig)) {
2075 		/* link_watch only sends one notification with current state
2076 		 * per second, avoid doing reconfig more frequently. Handle
2077 		 * wrap around.
2078 		 */
2079 		delay = next_reconfig - jiffies;
2080 		delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
2081 		schedule_delayed_work(&ndev_ctx->dwork, delay);
2082 		goto out_unlock;
2083 	}
2084 	ndev_ctx->last_reconfig = jiffies;
2085 
2086 	spin_lock_irqsave(&ndev_ctx->lock, flags);
2087 	if (!list_empty(&ndev_ctx->reconfig_events)) {
2088 		event = list_first_entry(&ndev_ctx->reconfig_events,
2089 					 struct netvsc_reconfig, list);
2090 		list_del(&event->list);
2091 		reschedule = !list_empty(&ndev_ctx->reconfig_events);
2092 	}
2093 	spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2094 
2095 	if (!event)
2096 		goto out_unlock;
2097 
2098 	switch (event->event) {
2099 		/* Only the following events are possible due to the check in
2100 		 * netvsc_linkstatus_callback()
2101 		 */
2102 	case RNDIS_STATUS_MEDIA_CONNECT:
2103 		if (rdev->link_state) {
2104 			rdev->link_state = false;
2105 			netif_carrier_on(net);
2106 			netvsc_tx_enable(net_device, net);
2107 		} else {
2108 			notify = true;
2109 		}
2110 		kfree(event);
2111 		break;
2112 	case RNDIS_STATUS_MEDIA_DISCONNECT:
2113 		if (!rdev->link_state) {
2114 			rdev->link_state = true;
2115 			netif_carrier_off(net);
2116 			netvsc_tx_disable(net_device, net);
2117 		}
2118 		kfree(event);
2119 		break;
2120 	case RNDIS_STATUS_NETWORK_CHANGE:
2121 		/* Only makes sense if carrier is present */
2122 		if (!rdev->link_state) {
2123 			rdev->link_state = true;
2124 			netif_carrier_off(net);
2125 			netvsc_tx_disable(net_device, net);
2126 			event->event = RNDIS_STATUS_MEDIA_CONNECT;
2127 			spin_lock_irqsave(&ndev_ctx->lock, flags);
2128 			list_add(&event->list, &ndev_ctx->reconfig_events);
2129 			spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2130 			reschedule = true;
2131 		}
2132 		break;
2133 	}
2134 
2135 	rtnl_unlock();
2136 
2137 	if (notify)
2138 		netdev_notify_peers(net);
2139 
2140 	/* link_watch only sends one notification with current state per
2141 	 * second, handle next reconfig event in 2 seconds.
2142 	 */
2143 	if (reschedule)
2144 		schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2145 
2146 	return;
2147 
2148 out_unlock:
2149 	rtnl_unlock();
2150 }
2151 
get_netvsc_byref(struct net_device * vf_netdev)2152 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
2153 {
2154 	struct net_device_context *net_device_ctx;
2155 	struct net_device *dev;
2156 
2157 	dev = netdev_master_upper_dev_get(vf_netdev);
2158 	if (!dev || dev->netdev_ops != &device_ops)
2159 		return NULL;	/* not a netvsc device */
2160 
2161 	net_device_ctx = netdev_priv(dev);
2162 	if (!rtnl_dereference(net_device_ctx->nvdev))
2163 		return NULL;	/* device is removed */
2164 
2165 	return dev;
2166 }
2167 
2168 /* Called when VF is injecting data into network stack.
2169  * Change the associated network device from VF to netvsc.
2170  * note: already called with rcu_read_lock
2171  */
netvsc_vf_handle_frame(struct sk_buff ** pskb)2172 static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
2173 {
2174 	struct sk_buff *skb = *pskb;
2175 	struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2176 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
2177 	struct netvsc_vf_pcpu_stats *pcpu_stats
2178 		 = this_cpu_ptr(ndev_ctx->vf_stats);
2179 
2180 	skb = skb_share_check(skb, GFP_ATOMIC);
2181 	if (unlikely(!skb))
2182 		return RX_HANDLER_CONSUMED;
2183 
2184 	*pskb = skb;
2185 
2186 	skb->dev = ndev;
2187 
2188 	u64_stats_update_begin(&pcpu_stats->syncp);
2189 	pcpu_stats->rx_packets++;
2190 	pcpu_stats->rx_bytes += skb->len;
2191 	u64_stats_update_end(&pcpu_stats->syncp);
2192 
2193 	return RX_HANDLER_ANOTHER;
2194 }
2195 
netvsc_vf_join(struct net_device * vf_netdev,struct net_device * ndev)2196 static int netvsc_vf_join(struct net_device *vf_netdev,
2197 			  struct net_device *ndev)
2198 {
2199 	struct net_device_context *ndev_ctx = netdev_priv(ndev);
2200 	int ret;
2201 
2202 	ret = netdev_rx_handler_register(vf_netdev,
2203 					 netvsc_vf_handle_frame, ndev);
2204 	if (ret != 0) {
2205 		netdev_err(vf_netdev,
2206 			   "can not register netvsc VF receive handler (err = %d)\n",
2207 			   ret);
2208 		goto rx_handler_failed;
2209 	}
2210 
2211 	ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2212 					   NULL, NULL, NULL);
2213 	if (ret != 0) {
2214 		netdev_err(vf_netdev,
2215 			   "can not set master device %s (err = %d)\n",
2216 			   ndev->name, ret);
2217 		goto upper_link_failed;
2218 	}
2219 
2220 	/* set slave flag before open to prevent IPv6 addrconf */
2221 	vf_netdev->flags |= IFF_SLAVE;
2222 
2223 	schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2224 
2225 	call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2226 
2227 	netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2228 	return 0;
2229 
2230 upper_link_failed:
2231 	netdev_rx_handler_unregister(vf_netdev);
2232 rx_handler_failed:
2233 	return ret;
2234 }
2235 
__netvsc_vf_setup(struct net_device * ndev,struct net_device * vf_netdev)2236 static void __netvsc_vf_setup(struct net_device *ndev,
2237 			      struct net_device *vf_netdev)
2238 {
2239 	int ret;
2240 
2241 	/* Align MTU of VF with master */
2242 	ret = dev_set_mtu(vf_netdev, ndev->mtu);
2243 	if (ret)
2244 		netdev_warn(vf_netdev,
2245 			    "unable to change mtu to %u\n", ndev->mtu);
2246 
2247 	/* set multicast etc flags on VF */
2248 	dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
2249 
2250 	/* sync address list from ndev to VF */
2251 	netif_addr_lock_bh(ndev);
2252 	dev_uc_sync(vf_netdev, ndev);
2253 	dev_mc_sync(vf_netdev, ndev);
2254 	netif_addr_unlock_bh(ndev);
2255 
2256 	if (netif_running(ndev)) {
2257 		ret = dev_open(vf_netdev, NULL);
2258 		if (ret)
2259 			netdev_warn(vf_netdev,
2260 				    "unable to open: %d\n", ret);
2261 	}
2262 }
2263 
2264 /* Setup VF as slave of the synthetic device.
2265  * Runs in workqueue to avoid recursion in netlink callbacks.
2266  */
netvsc_vf_setup(struct work_struct * w)2267 static void netvsc_vf_setup(struct work_struct *w)
2268 {
2269 	struct net_device_context *ndev_ctx
2270 		= container_of(w, struct net_device_context, vf_takeover.work);
2271 	struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2272 	struct net_device *vf_netdev;
2273 
2274 	if (!rtnl_trylock()) {
2275 		schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2276 		return;
2277 	}
2278 
2279 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2280 	if (vf_netdev)
2281 		__netvsc_vf_setup(ndev, vf_netdev);
2282 
2283 	rtnl_unlock();
2284 }
2285 
2286 /* Find netvsc by VF serial number.
2287  * The PCI hyperv controller records the serial number as the slot kobj name.
2288  */
get_netvsc_byslot(const struct net_device * vf_netdev)2289 static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2290 {
2291 	struct device *parent = vf_netdev->dev.parent;
2292 	struct net_device_context *ndev_ctx;
2293 	struct net_device *ndev;
2294 	struct pci_dev *pdev;
2295 	u32 serial;
2296 
2297 	if (!parent || !dev_is_pci(parent))
2298 		return NULL; /* not a PCI device */
2299 
2300 	pdev = to_pci_dev(parent);
2301 	if (!pdev->slot) {
2302 		netdev_notice(vf_netdev, "no PCI slot information\n");
2303 		return NULL;
2304 	}
2305 
2306 	if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2307 		netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2308 			      pci_slot_name(pdev->slot));
2309 		return NULL;
2310 	}
2311 
2312 	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2313 		if (!ndev_ctx->vf_alloc)
2314 			continue;
2315 
2316 		if (ndev_ctx->vf_serial == serial)
2317 			return hv_get_drvdata(ndev_ctx->device_ctx);
2318 	}
2319 
2320 	/* Fallback path to check synthetic vf with
2321 	 * help of mac addr
2322 	 */
2323 	list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2324 		ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2325 		if (ether_addr_equal(vf_netdev->perm_addr, ndev->perm_addr)) {
2326 			netdev_notice(vf_netdev,
2327 				      "falling back to mac addr based matching\n");
2328 			return ndev;
2329 		}
2330 	}
2331 
2332 	netdev_notice(vf_netdev,
2333 		      "no netdev found for vf serial:%u\n", serial);
2334 	return NULL;
2335 }
2336 
netvsc_register_vf(struct net_device * vf_netdev)2337 static int netvsc_register_vf(struct net_device *vf_netdev)
2338 {
2339 	struct net_device_context *net_device_ctx;
2340 	struct netvsc_device *netvsc_dev;
2341 	struct bpf_prog *prog;
2342 	struct net_device *ndev;
2343 	int ret;
2344 
2345 	if (vf_netdev->addr_len != ETH_ALEN)
2346 		return NOTIFY_DONE;
2347 
2348 	ndev = get_netvsc_byslot(vf_netdev);
2349 	if (!ndev)
2350 		return NOTIFY_DONE;
2351 
2352 	net_device_ctx = netdev_priv(ndev);
2353 	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2354 	if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
2355 		return NOTIFY_DONE;
2356 
2357 	/* if synthetic interface is a different namespace,
2358 	 * then move the VF to that namespace; join will be
2359 	 * done again in that context.
2360 	 */
2361 	if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2362 		ret = dev_change_net_namespace(vf_netdev,
2363 					       dev_net(ndev), "eth%d");
2364 		if (ret)
2365 			netdev_err(vf_netdev,
2366 				   "could not move to same namespace as %s: %d\n",
2367 				   ndev->name, ret);
2368 		else
2369 			netdev_info(vf_netdev,
2370 				    "VF moved to namespace with: %s\n",
2371 				    ndev->name);
2372 		return NOTIFY_DONE;
2373 	}
2374 
2375 	netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
2376 
2377 	if (netvsc_vf_join(vf_netdev, ndev) != 0)
2378 		return NOTIFY_DONE;
2379 
2380 	dev_hold(vf_netdev);
2381 	rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2382 
2383 	vf_netdev->wanted_features = ndev->features;
2384 	netdev_update_features(vf_netdev);
2385 
2386 	prog = netvsc_xdp_get(netvsc_dev);
2387 	netvsc_vf_setxdp(vf_netdev, prog);
2388 
2389 	return NOTIFY_OK;
2390 }
2391 
2392 /* Change the data path when VF UP/DOWN/CHANGE are detected.
2393  *
2394  * Typically a UP or DOWN event is followed by a CHANGE event, so
2395  * net_device_ctx->data_path_is_vf is used to cache the current data path
2396  * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate
2397  * message.
2398  *
2399  * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network
2400  * interface, there is only the CHANGE event and no UP or DOWN event.
2401  */
netvsc_vf_changed(struct net_device * vf_netdev)2402 static int netvsc_vf_changed(struct net_device *vf_netdev)
2403 {
2404 	struct net_device_context *net_device_ctx;
2405 	struct netvsc_device *netvsc_dev;
2406 	struct net_device *ndev;
2407 	bool vf_is_up = netif_running(vf_netdev);
2408 
2409 	ndev = get_netvsc_byref(vf_netdev);
2410 	if (!ndev)
2411 		return NOTIFY_DONE;
2412 
2413 	net_device_ctx = netdev_priv(ndev);
2414 	netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2415 	if (!netvsc_dev)
2416 		return NOTIFY_DONE;
2417 
2418 	if (net_device_ctx->data_path_is_vf == vf_is_up)
2419 		return NOTIFY_OK;
2420 	net_device_ctx->data_path_is_vf = vf_is_up;
2421 
2422 	if (vf_is_up && !net_device_ctx->vf_alloc) {
2423 		netdev_info(ndev, "Waiting for the VF association from host\n");
2424 		wait_for_completion(&net_device_ctx->vf_add);
2425 	}
2426 
2427 	netvsc_switch_datapath(ndev, vf_is_up);
2428 	netdev_info(ndev, "Data path switched %s VF: %s\n",
2429 		    vf_is_up ? "to" : "from", vf_netdev->name);
2430 
2431 	return NOTIFY_OK;
2432 }
2433 
netvsc_unregister_vf(struct net_device * vf_netdev)2434 static int netvsc_unregister_vf(struct net_device *vf_netdev)
2435 {
2436 	struct net_device *ndev;
2437 	struct net_device_context *net_device_ctx;
2438 
2439 	ndev = get_netvsc_byref(vf_netdev);
2440 	if (!ndev)
2441 		return NOTIFY_DONE;
2442 
2443 	net_device_ctx = netdev_priv(ndev);
2444 	cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
2445 
2446 	netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
2447 
2448 	netvsc_vf_setxdp(vf_netdev, NULL);
2449 
2450 	reinit_completion(&net_device_ctx->vf_add);
2451 	netdev_rx_handler_unregister(vf_netdev);
2452 	netdev_upper_dev_unlink(vf_netdev, ndev);
2453 	RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
2454 	dev_put(vf_netdev);
2455 
2456 	return NOTIFY_OK;
2457 }
2458 
netvsc_probe(struct hv_device * dev,const struct hv_vmbus_device_id * dev_id)2459 static int netvsc_probe(struct hv_device *dev,
2460 			const struct hv_vmbus_device_id *dev_id)
2461 {
2462 	struct net_device *net = NULL;
2463 	struct net_device_context *net_device_ctx;
2464 	struct netvsc_device_info *device_info = NULL;
2465 	struct netvsc_device *nvdev;
2466 	int ret = -ENOMEM;
2467 
2468 	net = alloc_etherdev_mq(sizeof(struct net_device_context),
2469 				VRSS_CHANNEL_MAX);
2470 	if (!net)
2471 		goto no_net;
2472 
2473 	netif_carrier_off(net);
2474 
2475 	netvsc_init_settings(net);
2476 
2477 	net_device_ctx = netdev_priv(net);
2478 	net_device_ctx->device_ctx = dev;
2479 	net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2480 	if (netif_msg_probe(net_device_ctx))
2481 		netdev_dbg(net, "netvsc msg_enable: %d\n",
2482 			   net_device_ctx->msg_enable);
2483 
2484 	hv_set_drvdata(dev, net);
2485 
2486 	INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
2487 
2488 	init_completion(&net_device_ctx->vf_add);
2489 	spin_lock_init(&net_device_ctx->lock);
2490 	INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
2491 	INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
2492 
2493 	net_device_ctx->vf_stats
2494 		= netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2495 	if (!net_device_ctx->vf_stats)
2496 		goto no_stats;
2497 
2498 	net->netdev_ops = &device_ops;
2499 	net->ethtool_ops = &ethtool_ops;
2500 	SET_NETDEV_DEV(net, &dev->device);
2501 
2502 	/* We always need headroom for rndis header */
2503 	net->needed_headroom = RNDIS_AND_PPI_SIZE;
2504 
2505 	/* Initialize the number of queues to be 1, we may change it if more
2506 	 * channels are offered later.
2507 	 */
2508 	netif_set_real_num_tx_queues(net, 1);
2509 	netif_set_real_num_rx_queues(net, 1);
2510 
2511 	/* Notify the netvsc driver of the new device */
2512 	device_info = netvsc_devinfo_get(NULL);
2513 
2514 	if (!device_info) {
2515 		ret = -ENOMEM;
2516 		goto devinfo_failed;
2517 	}
2518 
2519 	nvdev = rndis_filter_device_add(dev, device_info);
2520 	if (IS_ERR(nvdev)) {
2521 		ret = PTR_ERR(nvdev);
2522 		netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
2523 		goto rndis_failed;
2524 	}
2525 
2526 	memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
2527 
2528 	/* We must get rtnl lock before scheduling nvdev->subchan_work,
2529 	 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2530 	 * all subchannels to show up, but that may not happen because
2531 	 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2532 	 * -> ... -> device_add() -> ... -> __device_attach() can't get
2533 	 * the device lock, so all the subchannels can't be processed --
2534 	 * finally netvsc_subchan_work() hangs forever.
2535 	 */
2536 	rtnl_lock();
2537 
2538 	if (nvdev->num_chn > 1)
2539 		schedule_work(&nvdev->subchan_work);
2540 
2541 	/* hw_features computed in rndis_netdev_set_hwcaps() */
2542 	net->features = net->hw_features |
2543 		NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX |
2544 		NETIF_F_HW_VLAN_CTAG_RX;
2545 	net->vlan_features = net->features;
2546 
2547 	netdev_lockdep_set_classes(net);
2548 
2549 	/* MTU range: 68 - 1500 or 65521 */
2550 	net->min_mtu = NETVSC_MTU_MIN;
2551 	if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2552 		net->max_mtu = NETVSC_MTU - ETH_HLEN;
2553 	else
2554 		net->max_mtu = ETH_DATA_LEN;
2555 
2556 	nvdev->tx_disable = false;
2557 
2558 	ret = register_netdevice(net);
2559 	if (ret != 0) {
2560 		pr_err("Unable to register netdev.\n");
2561 		goto register_failed;
2562 	}
2563 
2564 	list_add(&net_device_ctx->list, &netvsc_dev_list);
2565 	rtnl_unlock();
2566 
2567 	netvsc_devinfo_put(device_info);
2568 	return 0;
2569 
2570 register_failed:
2571 	rtnl_unlock();
2572 	rndis_filter_device_remove(dev, nvdev);
2573 rndis_failed:
2574 	netvsc_devinfo_put(device_info);
2575 devinfo_failed:
2576 	free_percpu(net_device_ctx->vf_stats);
2577 no_stats:
2578 	hv_set_drvdata(dev, NULL);
2579 	free_netdev(net);
2580 no_net:
2581 	return ret;
2582 }
2583 
netvsc_remove(struct hv_device * dev)2584 static int netvsc_remove(struct hv_device *dev)
2585 {
2586 	struct net_device_context *ndev_ctx;
2587 	struct net_device *vf_netdev, *net;
2588 	struct netvsc_device *nvdev;
2589 
2590 	net = hv_get_drvdata(dev);
2591 	if (net == NULL) {
2592 		dev_err(&dev->device, "No net device to remove\n");
2593 		return 0;
2594 	}
2595 
2596 	ndev_ctx = netdev_priv(net);
2597 
2598 	cancel_delayed_work_sync(&ndev_ctx->dwork);
2599 
2600 	rtnl_lock();
2601 	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2602 	if (nvdev) {
2603 		cancel_work_sync(&nvdev->subchan_work);
2604 		netvsc_xdp_set(net, NULL, NULL, nvdev);
2605 	}
2606 
2607 	/*
2608 	 * Call to the vsc driver to let it know that the device is being
2609 	 * removed. Also blocks mtu and channel changes.
2610 	 */
2611 	vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2612 	if (vf_netdev)
2613 		netvsc_unregister_vf(vf_netdev);
2614 
2615 	if (nvdev)
2616 		rndis_filter_device_remove(dev, nvdev);
2617 
2618 	unregister_netdevice(net);
2619 	list_del(&ndev_ctx->list);
2620 
2621 	rtnl_unlock();
2622 
2623 	hv_set_drvdata(dev, NULL);
2624 
2625 	free_percpu(ndev_ctx->vf_stats);
2626 	free_netdev(net);
2627 	return 0;
2628 }
2629 
netvsc_suspend(struct hv_device * dev)2630 static int netvsc_suspend(struct hv_device *dev)
2631 {
2632 	struct net_device_context *ndev_ctx;
2633 	struct netvsc_device *nvdev;
2634 	struct net_device *net;
2635 	int ret;
2636 
2637 	net = hv_get_drvdata(dev);
2638 
2639 	ndev_ctx = netdev_priv(net);
2640 	cancel_delayed_work_sync(&ndev_ctx->dwork);
2641 
2642 	rtnl_lock();
2643 
2644 	nvdev = rtnl_dereference(ndev_ctx->nvdev);
2645 	if (nvdev == NULL) {
2646 		ret = -ENODEV;
2647 		goto out;
2648 	}
2649 
2650 	/* Save the current config info */
2651 	ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2652 	if (!ndev_ctx->saved_netvsc_dev_info) {
2653 		ret = -ENOMEM;
2654 		goto out;
2655 	}
2656 	ret = netvsc_detach(net, nvdev);
2657 out:
2658 	rtnl_unlock();
2659 
2660 	return ret;
2661 }
2662 
netvsc_resume(struct hv_device * dev)2663 static int netvsc_resume(struct hv_device *dev)
2664 {
2665 	struct net_device *net = hv_get_drvdata(dev);
2666 	struct net_device_context *net_device_ctx;
2667 	struct netvsc_device_info *device_info;
2668 	int ret;
2669 
2670 	rtnl_lock();
2671 
2672 	net_device_ctx = netdev_priv(net);
2673 
2674 	/* Reset the data path to the netvsc NIC before re-opening the vmbus
2675 	 * channel. Later netvsc_netdev_event() will switch the data path to
2676 	 * the VF upon the UP or CHANGE event.
2677 	 */
2678 	net_device_ctx->data_path_is_vf = false;
2679 	device_info = net_device_ctx->saved_netvsc_dev_info;
2680 
2681 	ret = netvsc_attach(net, device_info);
2682 
2683 	netvsc_devinfo_put(device_info);
2684 	net_device_ctx->saved_netvsc_dev_info = NULL;
2685 
2686 	rtnl_unlock();
2687 
2688 	return ret;
2689 }
2690 static const struct hv_vmbus_device_id id_table[] = {
2691 	/* Network guid */
2692 	{ HV_NIC_GUID, },
2693 	{ },
2694 };
2695 
2696 MODULE_DEVICE_TABLE(vmbus, id_table);
2697 
2698 /* The one and only one */
2699 static struct  hv_driver netvsc_drv = {
2700 	.name = KBUILD_MODNAME,
2701 	.id_table = id_table,
2702 	.probe = netvsc_probe,
2703 	.remove = netvsc_remove,
2704 	.suspend = netvsc_suspend,
2705 	.resume = netvsc_resume,
2706 	.driver = {
2707 		.probe_type = PROBE_FORCE_SYNCHRONOUS,
2708 	},
2709 };
2710 
2711 /*
2712  * On Hyper-V, every VF interface is matched with a corresponding
2713  * synthetic interface. The synthetic interface is presented first
2714  * to the guest. When the corresponding VF instance is registered,
2715  * we will take care of switching the data path.
2716  */
netvsc_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)2717 static int netvsc_netdev_event(struct notifier_block *this,
2718 			       unsigned long event, void *ptr)
2719 {
2720 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2721 
2722 	/* Skip our own events */
2723 	if (event_dev->netdev_ops == &device_ops)
2724 		return NOTIFY_DONE;
2725 
2726 	/* Avoid non-Ethernet type devices */
2727 	if (event_dev->type != ARPHRD_ETHER)
2728 		return NOTIFY_DONE;
2729 
2730 	/* Avoid Vlan dev with same MAC registering as VF */
2731 	if (is_vlan_dev(event_dev))
2732 		return NOTIFY_DONE;
2733 
2734 	/* Avoid Bonding master dev with same MAC registering as VF */
2735 	if ((event_dev->priv_flags & IFF_BONDING) &&
2736 	    (event_dev->flags & IFF_MASTER))
2737 		return NOTIFY_DONE;
2738 
2739 	switch (event) {
2740 	case NETDEV_REGISTER:
2741 		return netvsc_register_vf(event_dev);
2742 	case NETDEV_UNREGISTER:
2743 		return netvsc_unregister_vf(event_dev);
2744 	case NETDEV_UP:
2745 	case NETDEV_DOWN:
2746 	case NETDEV_CHANGE:
2747 		return netvsc_vf_changed(event_dev);
2748 	default:
2749 		return NOTIFY_DONE;
2750 	}
2751 }
2752 
2753 static struct notifier_block netvsc_netdev_notifier = {
2754 	.notifier_call = netvsc_netdev_event,
2755 };
2756 
netvsc_drv_exit(void)2757 static void __exit netvsc_drv_exit(void)
2758 {
2759 	unregister_netdevice_notifier(&netvsc_netdev_notifier);
2760 	vmbus_driver_unregister(&netvsc_drv);
2761 }
2762 
netvsc_drv_init(void)2763 static int __init netvsc_drv_init(void)
2764 {
2765 	int ret;
2766 
2767 	if (ring_size < RING_SIZE_MIN) {
2768 		ring_size = RING_SIZE_MIN;
2769 		pr_info("Increased ring_size to %u (min allowed)\n",
2770 			ring_size);
2771 	}
2772 	netvsc_ring_bytes = ring_size * PAGE_SIZE;
2773 
2774 	ret = vmbus_driver_register(&netvsc_drv);
2775 	if (ret)
2776 		return ret;
2777 
2778 	register_netdevice_notifier(&netvsc_netdev_notifier);
2779 	return 0;
2780 }
2781 
2782 MODULE_LICENSE("GPL");
2783 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
2784 
2785 module_init(netvsc_drv_init);
2786 module_exit(netvsc_drv_exit);
2787