1 /*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool.h>
49 #include <linux/bpf_trace.h>
50
51 #include <xen/xen.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
57
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
61
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues;
65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
67 "Maximum number of queues per virtual interface");
68
69 static bool __read_mostly xennet_trusted = true;
70 module_param_named(trusted, xennet_trusted, bool, 0644);
71 MODULE_PARM_DESC(trusted, "Is the backend trusted");
72
73 #define XENNET_TIMEOUT (5 * HZ)
74
75 static const struct ethtool_ops xennet_ethtool_ops;
76
77 struct netfront_cb {
78 int pull_to;
79 };
80
81 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
82
83 #define RX_COPY_THRESHOLD 256
84
85 #define GRANT_INVALID_REF 0
86
87 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
88 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
89
90 /* Minimum number of Rx slots (includes slot for GSO metadata). */
91 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
92
93 /* Queue name is interface name with "-qNNN" appended */
94 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
95
96 /* IRQ name is queue name with "-tx" or "-rx" appended */
97 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
98
99 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
100
101 struct netfront_stats {
102 u64 packets;
103 u64 bytes;
104 struct u64_stats_sync syncp;
105 };
106
107 struct netfront_info;
108
109 struct netfront_queue {
110 unsigned int id; /* Queue ID, 0-based */
111 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
112 struct netfront_info *info;
113
114 struct bpf_prog __rcu *xdp_prog;
115
116 struct napi_struct napi;
117
118 /* Split event channels support, tx_* == rx_* when using
119 * single event channel.
120 */
121 unsigned int tx_evtchn, rx_evtchn;
122 unsigned int tx_irq, rx_irq;
123 /* Only used when split event channels support is enabled */
124 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
125 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
126
127 spinlock_t tx_lock;
128 struct xen_netif_tx_front_ring tx;
129 int tx_ring_ref;
130
131 /*
132 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
133 * are linked from tx_skb_freelist through tx_link.
134 */
135 struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
136 unsigned short tx_link[NET_TX_RING_SIZE];
137 #define TX_LINK_NONE 0xffff
138 #define TX_PENDING 0xfffe
139 grant_ref_t gref_tx_head;
140 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
141 struct page *grant_tx_page[NET_TX_RING_SIZE];
142 unsigned tx_skb_freelist;
143 unsigned int tx_pend_queue;
144
145 spinlock_t rx_lock ____cacheline_aligned_in_smp;
146 struct xen_netif_rx_front_ring rx;
147 int rx_ring_ref;
148
149 struct timer_list rx_refill_timer;
150
151 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
152 grant_ref_t gref_rx_head;
153 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
154
155 unsigned int rx_rsp_unconsumed;
156 spinlock_t rx_cons_lock;
157
158 struct page_pool *page_pool;
159 struct xdp_rxq_info xdp_rxq;
160 };
161
162 struct netfront_info {
163 struct list_head list;
164 struct net_device *netdev;
165
166 struct xenbus_device *xbdev;
167
168 /* Multi-queue support */
169 struct netfront_queue *queues;
170
171 /* Statistics */
172 struct netfront_stats __percpu *rx_stats;
173 struct netfront_stats __percpu *tx_stats;
174
175 /* XDP state */
176 bool netback_has_xdp_headroom;
177 bool netfront_xdp_enabled;
178
179 /* Is device behaving sane? */
180 bool broken;
181
182 /* Should skbs be bounced into a zeroed buffer? */
183 bool bounce;
184
185 atomic_t rx_gso_checksum_fixup;
186 };
187
188 struct netfront_rx_info {
189 struct xen_netif_rx_response rx;
190 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
191 };
192
193 /*
194 * Access macros for acquiring freeing slots in tx_skbs[].
195 */
196
add_id_to_list(unsigned * head,unsigned short * list,unsigned short id)197 static void add_id_to_list(unsigned *head, unsigned short *list,
198 unsigned short id)
199 {
200 list[id] = *head;
201 *head = id;
202 }
203
get_id_from_list(unsigned * head,unsigned short * list)204 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
205 {
206 unsigned int id = *head;
207
208 if (id != TX_LINK_NONE) {
209 *head = list[id];
210 list[id] = TX_LINK_NONE;
211 }
212 return id;
213 }
214
xennet_rxidx(RING_IDX idx)215 static int xennet_rxidx(RING_IDX idx)
216 {
217 return idx & (NET_RX_RING_SIZE - 1);
218 }
219
xennet_get_rx_skb(struct netfront_queue * queue,RING_IDX ri)220 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
221 RING_IDX ri)
222 {
223 int i = xennet_rxidx(ri);
224 struct sk_buff *skb = queue->rx_skbs[i];
225 queue->rx_skbs[i] = NULL;
226 return skb;
227 }
228
xennet_get_rx_ref(struct netfront_queue * queue,RING_IDX ri)229 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
230 RING_IDX ri)
231 {
232 int i = xennet_rxidx(ri);
233 grant_ref_t ref = queue->grant_rx_ref[i];
234 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
235 return ref;
236 }
237
238 #ifdef CONFIG_SYSFS
239 static const struct attribute_group xennet_dev_group;
240 #endif
241
xennet_can_sg(struct net_device * dev)242 static bool xennet_can_sg(struct net_device *dev)
243 {
244 return dev->features & NETIF_F_SG;
245 }
246
247
rx_refill_timeout(struct timer_list * t)248 static void rx_refill_timeout(struct timer_list *t)
249 {
250 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
251 napi_schedule(&queue->napi);
252 }
253
netfront_tx_slot_available(struct netfront_queue * queue)254 static int netfront_tx_slot_available(struct netfront_queue *queue)
255 {
256 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
257 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
258 }
259
xennet_maybe_wake_tx(struct netfront_queue * queue)260 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
261 {
262 struct net_device *dev = queue->info->netdev;
263 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
264
265 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
266 netfront_tx_slot_available(queue) &&
267 likely(netif_running(dev)))
268 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
269 }
270
271
xennet_alloc_one_rx_buffer(struct netfront_queue * queue)272 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
273 {
274 struct sk_buff *skb;
275 struct page *page;
276
277 skb = __netdev_alloc_skb(queue->info->netdev,
278 RX_COPY_THRESHOLD + NET_IP_ALIGN,
279 GFP_ATOMIC | __GFP_NOWARN);
280 if (unlikely(!skb))
281 return NULL;
282
283 page = page_pool_alloc_pages(queue->page_pool,
284 GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
285 if (unlikely(!page)) {
286 kfree_skb(skb);
287 return NULL;
288 }
289 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290
291 /* Align ip header to a 16 bytes boundary */
292 skb_reserve(skb, NET_IP_ALIGN);
293 skb->dev = queue->info->netdev;
294
295 return skb;
296 }
297
298
xennet_alloc_rx_buffers(struct netfront_queue * queue)299 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
300 {
301 RING_IDX req_prod = queue->rx.req_prod_pvt;
302 int notify;
303 int err = 0;
304
305 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
306 return;
307
308 for (req_prod = queue->rx.req_prod_pvt;
309 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
310 req_prod++) {
311 struct sk_buff *skb;
312 unsigned short id;
313 grant_ref_t ref;
314 struct page *page;
315 struct xen_netif_rx_request *req;
316
317 skb = xennet_alloc_one_rx_buffer(queue);
318 if (!skb) {
319 err = -ENOMEM;
320 break;
321 }
322
323 id = xennet_rxidx(req_prod);
324
325 BUG_ON(queue->rx_skbs[id]);
326 queue->rx_skbs[id] = skb;
327
328 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
329 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
330 queue->grant_rx_ref[id] = ref;
331
332 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
333
334 req = RING_GET_REQUEST(&queue->rx, req_prod);
335 gnttab_page_grant_foreign_access_ref_one(ref,
336 queue->info->xbdev->otherend_id,
337 page,
338 0);
339 req->id = id;
340 req->gref = ref;
341 }
342
343 queue->rx.req_prod_pvt = req_prod;
344
345 /* Try again later if there are not enough requests or skb allocation
346 * failed.
347 * Enough requests is quantified as the sum of newly created slots and
348 * the unconsumed slots at the backend.
349 */
350 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
351 unlikely(err)) {
352 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
353 return;
354 }
355
356 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
357 if (notify)
358 notify_remote_via_irq(queue->rx_irq);
359 }
360
xennet_open(struct net_device * dev)361 static int xennet_open(struct net_device *dev)
362 {
363 struct netfront_info *np = netdev_priv(dev);
364 unsigned int num_queues = dev->real_num_tx_queues;
365 unsigned int i = 0;
366 struct netfront_queue *queue = NULL;
367
368 if (!np->queues || np->broken)
369 return -ENODEV;
370
371 for (i = 0; i < num_queues; ++i) {
372 queue = &np->queues[i];
373 napi_enable(&queue->napi);
374
375 spin_lock_bh(&queue->rx_lock);
376 if (netif_carrier_ok(dev)) {
377 xennet_alloc_rx_buffers(queue);
378 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
379 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
380 napi_schedule(&queue->napi);
381 }
382 spin_unlock_bh(&queue->rx_lock);
383 }
384
385 netif_tx_start_all_queues(dev);
386
387 return 0;
388 }
389
xennet_tx_buf_gc(struct netfront_queue * queue)390 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
391 {
392 RING_IDX cons, prod;
393 unsigned short id;
394 struct sk_buff *skb;
395 bool more_to_do;
396 bool work_done = false;
397 const struct device *dev = &queue->info->netdev->dev;
398
399 BUG_ON(!netif_carrier_ok(queue->info->netdev));
400
401 do {
402 prod = queue->tx.sring->rsp_prod;
403 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
404 dev_alert(dev, "Illegal number of responses %u\n",
405 prod - queue->tx.rsp_cons);
406 goto err;
407 }
408 rmb(); /* Ensure we see responses up to 'rp'. */
409
410 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
411 struct xen_netif_tx_response txrsp;
412
413 work_done = true;
414
415 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
416 if (txrsp.status == XEN_NETIF_RSP_NULL)
417 continue;
418
419 id = txrsp.id;
420 if (id >= RING_SIZE(&queue->tx)) {
421 dev_alert(dev,
422 "Response has incorrect id (%u)\n",
423 id);
424 goto err;
425 }
426 if (queue->tx_link[id] != TX_PENDING) {
427 dev_alert(dev,
428 "Response for inactive request\n");
429 goto err;
430 }
431
432 queue->tx_link[id] = TX_LINK_NONE;
433 skb = queue->tx_skbs[id];
434 queue->tx_skbs[id] = NULL;
435 if (unlikely(!gnttab_end_foreign_access_ref(
436 queue->grant_tx_ref[id], GNTMAP_readonly))) {
437 dev_alert(dev,
438 "Grant still in use by backend domain\n");
439 goto err;
440 }
441 gnttab_release_grant_reference(
442 &queue->gref_tx_head, queue->grant_tx_ref[id]);
443 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
444 queue->grant_tx_page[id] = NULL;
445 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
446 dev_kfree_skb_irq(skb);
447 }
448
449 queue->tx.rsp_cons = prod;
450
451 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
452 } while (more_to_do);
453
454 xennet_maybe_wake_tx(queue);
455
456 return work_done;
457
458 err:
459 queue->info->broken = true;
460 dev_alert(dev, "Disabled for further use\n");
461
462 return work_done;
463 }
464
465 struct xennet_gnttab_make_txreq {
466 struct netfront_queue *queue;
467 struct sk_buff *skb;
468 struct page *page;
469 struct xen_netif_tx_request *tx; /* Last request on ring page */
470 struct xen_netif_tx_request tx_local; /* Last request local copy*/
471 unsigned int size;
472 };
473
xennet_tx_setup_grant(unsigned long gfn,unsigned int offset,unsigned int len,void * data)474 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
475 unsigned int len, void *data)
476 {
477 struct xennet_gnttab_make_txreq *info = data;
478 unsigned int id;
479 struct xen_netif_tx_request *tx;
480 grant_ref_t ref;
481 /* convenient aliases */
482 struct page *page = info->page;
483 struct netfront_queue *queue = info->queue;
484 struct sk_buff *skb = info->skb;
485
486 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
487 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
488 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
489 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
490
491 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
492 gfn, GNTMAP_readonly);
493
494 queue->tx_skbs[id] = skb;
495 queue->grant_tx_page[id] = page;
496 queue->grant_tx_ref[id] = ref;
497
498 info->tx_local.id = id;
499 info->tx_local.gref = ref;
500 info->tx_local.offset = offset;
501 info->tx_local.size = len;
502 info->tx_local.flags = 0;
503
504 *tx = info->tx_local;
505
506 /*
507 * Put the request in the pending queue, it will be set to be pending
508 * when the producer index is about to be raised.
509 */
510 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
511
512 info->tx = tx;
513 info->size += info->tx_local.size;
514 }
515
xennet_make_first_txreq(struct xennet_gnttab_make_txreq * info,unsigned int offset,unsigned int len)516 static struct xen_netif_tx_request *xennet_make_first_txreq(
517 struct xennet_gnttab_make_txreq *info,
518 unsigned int offset, unsigned int len)
519 {
520 info->size = 0;
521
522 gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
523
524 return info->tx;
525 }
526
xennet_make_one_txreq(unsigned long gfn,unsigned int offset,unsigned int len,void * data)527 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
528 unsigned int len, void *data)
529 {
530 struct xennet_gnttab_make_txreq *info = data;
531
532 info->tx->flags |= XEN_NETTXF_more_data;
533 skb_get(info->skb);
534 xennet_tx_setup_grant(gfn, offset, len, data);
535 }
536
xennet_make_txreqs(struct xennet_gnttab_make_txreq * info,struct page * page,unsigned int offset,unsigned int len)537 static void xennet_make_txreqs(
538 struct xennet_gnttab_make_txreq *info,
539 struct page *page,
540 unsigned int offset, unsigned int len)
541 {
542 /* Skip unused frames from start of page */
543 page += offset >> PAGE_SHIFT;
544 offset &= ~PAGE_MASK;
545
546 while (len) {
547 info->page = page;
548 info->size = 0;
549
550 gnttab_foreach_grant_in_range(page, offset, len,
551 xennet_make_one_txreq,
552 info);
553
554 page++;
555 offset = 0;
556 len -= info->size;
557 }
558 }
559
560 /*
561 * Count how many ring slots are required to send this skb. Each frag
562 * might be a compound page.
563 */
xennet_count_skb_slots(struct sk_buff * skb)564 static int xennet_count_skb_slots(struct sk_buff *skb)
565 {
566 int i, frags = skb_shinfo(skb)->nr_frags;
567 int slots;
568
569 slots = gnttab_count_grant(offset_in_page(skb->data),
570 skb_headlen(skb));
571
572 for (i = 0; i < frags; i++) {
573 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
574 unsigned long size = skb_frag_size(frag);
575 unsigned long offset = skb_frag_off(frag);
576
577 /* Skip unused frames from start of page */
578 offset &= ~PAGE_MASK;
579
580 slots += gnttab_count_grant(offset, size);
581 }
582
583 return slots;
584 }
585
xennet_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)586 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
587 struct net_device *sb_dev)
588 {
589 unsigned int num_queues = dev->real_num_tx_queues;
590 u32 hash;
591 u16 queue_idx;
592
593 /* First, check if there is only one queue */
594 if (num_queues == 1) {
595 queue_idx = 0;
596 } else {
597 hash = skb_get_hash(skb);
598 queue_idx = hash % num_queues;
599 }
600
601 return queue_idx;
602 }
603
xennet_mark_tx_pending(struct netfront_queue * queue)604 static void xennet_mark_tx_pending(struct netfront_queue *queue)
605 {
606 unsigned int i;
607
608 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
609 TX_LINK_NONE)
610 queue->tx_link[i] = TX_PENDING;
611 }
612
xennet_xdp_xmit_one(struct net_device * dev,struct netfront_queue * queue,struct xdp_frame * xdpf)613 static int xennet_xdp_xmit_one(struct net_device *dev,
614 struct netfront_queue *queue,
615 struct xdp_frame *xdpf)
616 {
617 struct netfront_info *np = netdev_priv(dev);
618 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
619 struct xennet_gnttab_make_txreq info = {
620 .queue = queue,
621 .skb = NULL,
622 .page = virt_to_page(xdpf->data),
623 };
624 int notify;
625
626 xennet_make_first_txreq(&info,
627 offset_in_page(xdpf->data),
628 xdpf->len);
629
630 xennet_mark_tx_pending(queue);
631
632 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
633 if (notify)
634 notify_remote_via_irq(queue->tx_irq);
635
636 u64_stats_update_begin(&tx_stats->syncp);
637 tx_stats->bytes += xdpf->len;
638 tx_stats->packets++;
639 u64_stats_update_end(&tx_stats->syncp);
640
641 xennet_tx_buf_gc(queue);
642
643 return 0;
644 }
645
xennet_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)646 static int xennet_xdp_xmit(struct net_device *dev, int n,
647 struct xdp_frame **frames, u32 flags)
648 {
649 unsigned int num_queues = dev->real_num_tx_queues;
650 struct netfront_info *np = netdev_priv(dev);
651 struct netfront_queue *queue = NULL;
652 unsigned long irq_flags;
653 int drops = 0;
654 int i, err;
655
656 if (unlikely(np->broken))
657 return -ENODEV;
658 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
659 return -EINVAL;
660
661 queue = &np->queues[smp_processor_id() % num_queues];
662
663 spin_lock_irqsave(&queue->tx_lock, irq_flags);
664 for (i = 0; i < n; i++) {
665 struct xdp_frame *xdpf = frames[i];
666
667 if (!xdpf)
668 continue;
669 err = xennet_xdp_xmit_one(dev, queue, xdpf);
670 if (err) {
671 xdp_return_frame_rx_napi(xdpf);
672 drops++;
673 }
674 }
675 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
676
677 return n - drops;
678 }
679
bounce_skb(const struct sk_buff * skb)680 struct sk_buff *bounce_skb(const struct sk_buff *skb)
681 {
682 unsigned int headerlen = skb_headroom(skb);
683 /* Align size to allocate full pages and avoid contiguous data leaks */
684 unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
685 XEN_PAGE_SIZE);
686 struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
687
688 if (!n)
689 return NULL;
690
691 if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
692 WARN_ONCE(1, "misaligned skb allocated\n");
693 kfree_skb(n);
694 return NULL;
695 }
696
697 /* Set the data pointer */
698 skb_reserve(n, headerlen);
699 /* Set the tail pointer and length */
700 skb_put(n, skb->len);
701
702 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
703
704 skb_copy_header(n, skb);
705 return n;
706 }
707
708 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
709
xennet_start_xmit(struct sk_buff * skb,struct net_device * dev)710 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
711 {
712 struct netfront_info *np = netdev_priv(dev);
713 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
714 struct xen_netif_tx_request *first_tx;
715 unsigned int i;
716 int notify;
717 int slots;
718 struct page *page;
719 unsigned int offset;
720 unsigned int len;
721 unsigned long flags;
722 struct netfront_queue *queue = NULL;
723 struct xennet_gnttab_make_txreq info = { };
724 unsigned int num_queues = dev->real_num_tx_queues;
725 u16 queue_index;
726 struct sk_buff *nskb;
727
728 /* Drop the packet if no queues are set up */
729 if (num_queues < 1)
730 goto drop;
731 if (unlikely(np->broken))
732 goto drop;
733 /* Determine which queue to transmit this SKB on */
734 queue_index = skb_get_queue_mapping(skb);
735 queue = &np->queues[queue_index];
736
737 /* If skb->len is too big for wire format, drop skb and alert
738 * user about misconfiguration.
739 */
740 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
741 net_alert_ratelimited(
742 "xennet: skb->len = %u, too big for wire format\n",
743 skb->len);
744 goto drop;
745 }
746
747 slots = xennet_count_skb_slots(skb);
748 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
749 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
750 slots, skb->len);
751 if (skb_linearize(skb))
752 goto drop;
753 }
754
755 page = virt_to_page(skb->data);
756 offset = offset_in_page(skb->data);
757
758 /* The first req should be at least ETH_HLEN size or the packet will be
759 * dropped by netback.
760 *
761 * If the backend is not trusted bounce all data to zeroed pages to
762 * avoid exposing contiguous data on the granted page not belonging to
763 * the skb.
764 */
765 if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
766 nskb = bounce_skb(skb);
767 if (!nskb)
768 goto drop;
769 dev_consume_skb_any(skb);
770 skb = nskb;
771 page = virt_to_page(skb->data);
772 offset = offset_in_page(skb->data);
773 }
774
775 len = skb_headlen(skb);
776
777 spin_lock_irqsave(&queue->tx_lock, flags);
778
779 if (unlikely(!netif_carrier_ok(dev) ||
780 (slots > 1 && !xennet_can_sg(dev)) ||
781 netif_needs_gso(skb, netif_skb_features(skb)))) {
782 spin_unlock_irqrestore(&queue->tx_lock, flags);
783 goto drop;
784 }
785
786 /* First request for the linear area. */
787 info.queue = queue;
788 info.skb = skb;
789 info.page = page;
790 first_tx = xennet_make_first_txreq(&info, offset, len);
791 offset += info.tx_local.size;
792 if (offset == PAGE_SIZE) {
793 page++;
794 offset = 0;
795 }
796 len -= info.tx_local.size;
797
798 if (skb->ip_summed == CHECKSUM_PARTIAL)
799 /* local packet? */
800 first_tx->flags |= XEN_NETTXF_csum_blank |
801 XEN_NETTXF_data_validated;
802 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
803 /* remote but checksummed. */
804 first_tx->flags |= XEN_NETTXF_data_validated;
805
806 /* Optional extra info after the first request. */
807 if (skb_shinfo(skb)->gso_size) {
808 struct xen_netif_extra_info *gso;
809
810 gso = (struct xen_netif_extra_info *)
811 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
812
813 first_tx->flags |= XEN_NETTXF_extra_info;
814
815 gso->u.gso.size = skb_shinfo(skb)->gso_size;
816 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
817 XEN_NETIF_GSO_TYPE_TCPV6 :
818 XEN_NETIF_GSO_TYPE_TCPV4;
819 gso->u.gso.pad = 0;
820 gso->u.gso.features = 0;
821
822 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
823 gso->flags = 0;
824 }
825
826 /* Requests for the rest of the linear area. */
827 xennet_make_txreqs(&info, page, offset, len);
828
829 /* Requests for all the frags. */
830 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
831 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
832 xennet_make_txreqs(&info, skb_frag_page(frag),
833 skb_frag_off(frag),
834 skb_frag_size(frag));
835 }
836
837 /* First request has the packet length. */
838 first_tx->size = skb->len;
839
840 /* timestamp packet in software */
841 skb_tx_timestamp(skb);
842
843 xennet_mark_tx_pending(queue);
844
845 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
846 if (notify)
847 notify_remote_via_irq(queue->tx_irq);
848
849 u64_stats_update_begin(&tx_stats->syncp);
850 tx_stats->bytes += skb->len;
851 tx_stats->packets++;
852 u64_stats_update_end(&tx_stats->syncp);
853
854 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
855 xennet_tx_buf_gc(queue);
856
857 if (!netfront_tx_slot_available(queue))
858 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
859
860 spin_unlock_irqrestore(&queue->tx_lock, flags);
861
862 return NETDEV_TX_OK;
863
864 drop:
865 dev->stats.tx_dropped++;
866 dev_kfree_skb_any(skb);
867 return NETDEV_TX_OK;
868 }
869
xennet_close(struct net_device * dev)870 static int xennet_close(struct net_device *dev)
871 {
872 struct netfront_info *np = netdev_priv(dev);
873 unsigned int num_queues = dev->real_num_tx_queues;
874 unsigned int i;
875 struct netfront_queue *queue;
876 netif_tx_stop_all_queues(np->netdev);
877 for (i = 0; i < num_queues; ++i) {
878 queue = &np->queues[i];
879 napi_disable(&queue->napi);
880 }
881 return 0;
882 }
883
xennet_destroy_queues(struct netfront_info * info)884 static void xennet_destroy_queues(struct netfront_info *info)
885 {
886 unsigned int i;
887
888 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
889 struct netfront_queue *queue = &info->queues[i];
890
891 if (netif_running(info->netdev))
892 napi_disable(&queue->napi);
893 netif_napi_del(&queue->napi);
894 }
895
896 kfree(info->queues);
897 info->queues = NULL;
898 }
899
xennet_uninit(struct net_device * dev)900 static void xennet_uninit(struct net_device *dev)
901 {
902 struct netfront_info *np = netdev_priv(dev);
903 xennet_destroy_queues(np);
904 }
905
xennet_set_rx_rsp_cons(struct netfront_queue * queue,RING_IDX val)906 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
907 {
908 unsigned long flags;
909
910 spin_lock_irqsave(&queue->rx_cons_lock, flags);
911 queue->rx.rsp_cons = val;
912 queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
913 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
914 }
915
xennet_move_rx_slot(struct netfront_queue * queue,struct sk_buff * skb,grant_ref_t ref)916 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
917 grant_ref_t ref)
918 {
919 int new = xennet_rxidx(queue->rx.req_prod_pvt);
920
921 BUG_ON(queue->rx_skbs[new]);
922 queue->rx_skbs[new] = skb;
923 queue->grant_rx_ref[new] = ref;
924 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
925 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
926 queue->rx.req_prod_pvt++;
927 }
928
xennet_get_extras(struct netfront_queue * queue,struct xen_netif_extra_info * extras,RING_IDX rp)929 static int xennet_get_extras(struct netfront_queue *queue,
930 struct xen_netif_extra_info *extras,
931 RING_IDX rp)
932
933 {
934 struct xen_netif_extra_info extra;
935 struct device *dev = &queue->info->netdev->dev;
936 RING_IDX cons = queue->rx.rsp_cons;
937 int err = 0;
938
939 do {
940 struct sk_buff *skb;
941 grant_ref_t ref;
942
943 if (unlikely(cons + 1 == rp)) {
944 if (net_ratelimit())
945 dev_warn(dev, "Missing extra info\n");
946 err = -EBADR;
947 break;
948 }
949
950 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
951
952 if (unlikely(!extra.type ||
953 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
954 if (net_ratelimit())
955 dev_warn(dev, "Invalid extra type: %d\n",
956 extra.type);
957 err = -EINVAL;
958 } else {
959 extras[extra.type - 1] = extra;
960 }
961
962 skb = xennet_get_rx_skb(queue, cons);
963 ref = xennet_get_rx_ref(queue, cons);
964 xennet_move_rx_slot(queue, skb, ref);
965 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
966
967 xennet_set_rx_rsp_cons(queue, cons);
968 return err;
969 }
970
xennet_run_xdp(struct netfront_queue * queue,struct page * pdata,struct xen_netif_rx_response * rx,struct bpf_prog * prog,struct xdp_buff * xdp,bool * need_xdp_flush)971 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
972 struct xen_netif_rx_response *rx, struct bpf_prog *prog,
973 struct xdp_buff *xdp, bool *need_xdp_flush)
974 {
975 struct xdp_frame *xdpf;
976 u32 len = rx->status;
977 u32 act;
978 int err;
979
980 xdp->data_hard_start = page_address(pdata);
981 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
982 xdp_set_data_meta_invalid(xdp);
983 xdp->data_end = xdp->data + len;
984 xdp->rxq = &queue->xdp_rxq;
985 xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
986
987 act = bpf_prog_run_xdp(prog, xdp);
988 switch (act) {
989 case XDP_TX:
990 get_page(pdata);
991 xdpf = xdp_convert_buff_to_frame(xdp);
992 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
993 if (unlikely(err < 0))
994 trace_xdp_exception(queue->info->netdev, prog, act);
995 break;
996 case XDP_REDIRECT:
997 get_page(pdata);
998 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
999 *need_xdp_flush = true;
1000 if (unlikely(err))
1001 trace_xdp_exception(queue->info->netdev, prog, act);
1002 break;
1003 case XDP_PASS:
1004 case XDP_DROP:
1005 break;
1006
1007 case XDP_ABORTED:
1008 trace_xdp_exception(queue->info->netdev, prog, act);
1009 break;
1010
1011 default:
1012 bpf_warn_invalid_xdp_action(act);
1013 }
1014
1015 return act;
1016 }
1017
xennet_get_responses(struct netfront_queue * queue,struct netfront_rx_info * rinfo,RING_IDX rp,struct sk_buff_head * list,bool * need_xdp_flush)1018 static int xennet_get_responses(struct netfront_queue *queue,
1019 struct netfront_rx_info *rinfo, RING_IDX rp,
1020 struct sk_buff_head *list,
1021 bool *need_xdp_flush)
1022 {
1023 struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1024 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
1025 RING_IDX cons = queue->rx.rsp_cons;
1026 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1027 struct xen_netif_extra_info *extras = rinfo->extras;
1028 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1029 struct device *dev = &queue->info->netdev->dev;
1030 struct bpf_prog *xdp_prog;
1031 struct xdp_buff xdp;
1032 int slots = 1;
1033 int err = 0;
1034 u32 verdict;
1035
1036 if (rx->flags & XEN_NETRXF_extra_info) {
1037 err = xennet_get_extras(queue, extras, rp);
1038 if (!err) {
1039 if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1040 struct xen_netif_extra_info *xdp;
1041
1042 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1043 rx->offset = xdp->u.xdp.headroom;
1044 }
1045 }
1046 cons = queue->rx.rsp_cons;
1047 }
1048
1049 for (;;) {
1050 if (unlikely(rx->status < 0 ||
1051 rx->offset + rx->status > XEN_PAGE_SIZE)) {
1052 if (net_ratelimit())
1053 dev_warn(dev, "rx->offset: %u, size: %d\n",
1054 rx->offset, rx->status);
1055 xennet_move_rx_slot(queue, skb, ref);
1056 err = -EINVAL;
1057 goto next;
1058 }
1059
1060 /*
1061 * This definitely indicates a bug, either in this driver or in
1062 * the backend driver. In future this should flag the bad
1063 * situation to the system controller to reboot the backend.
1064 */
1065 if (ref == GRANT_INVALID_REF) {
1066 if (net_ratelimit())
1067 dev_warn(dev, "Bad rx response id %d.\n",
1068 rx->id);
1069 err = -EINVAL;
1070 goto next;
1071 }
1072
1073 if (!gnttab_end_foreign_access_ref(ref, 0)) {
1074 dev_alert(dev,
1075 "Grant still in use by backend domain\n");
1076 queue->info->broken = true;
1077 dev_alert(dev, "Disabled for further use\n");
1078 return -EINVAL;
1079 }
1080
1081 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1082
1083 rcu_read_lock();
1084 xdp_prog = rcu_dereference(queue->xdp_prog);
1085 if (xdp_prog) {
1086 if (!(rx->flags & XEN_NETRXF_more_data)) {
1087 /* currently only a single page contains data */
1088 verdict = xennet_run_xdp(queue,
1089 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1090 rx, xdp_prog, &xdp, need_xdp_flush);
1091 if (verdict != XDP_PASS)
1092 err = -EINVAL;
1093 } else {
1094 /* drop the frame */
1095 err = -EINVAL;
1096 }
1097 }
1098 rcu_read_unlock();
1099
1100 __skb_queue_tail(list, skb);
1101
1102 next:
1103 if (!(rx->flags & XEN_NETRXF_more_data))
1104 break;
1105
1106 if (cons + slots == rp) {
1107 if (net_ratelimit())
1108 dev_warn(dev, "Need more slots\n");
1109 err = -ENOENT;
1110 break;
1111 }
1112
1113 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1114 rx = &rx_local;
1115 skb = xennet_get_rx_skb(queue, cons + slots);
1116 ref = xennet_get_rx_ref(queue, cons + slots);
1117 slots++;
1118 }
1119
1120 if (unlikely(slots > max)) {
1121 if (net_ratelimit())
1122 dev_warn(dev, "Too many slots\n");
1123 err = -E2BIG;
1124 }
1125
1126 if (unlikely(err))
1127 xennet_set_rx_rsp_cons(queue, cons + slots);
1128
1129 return err;
1130 }
1131
xennet_set_skb_gso(struct sk_buff * skb,struct xen_netif_extra_info * gso)1132 static int xennet_set_skb_gso(struct sk_buff *skb,
1133 struct xen_netif_extra_info *gso)
1134 {
1135 if (!gso->u.gso.size) {
1136 if (net_ratelimit())
1137 pr_warn("GSO size must not be zero\n");
1138 return -EINVAL;
1139 }
1140
1141 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1142 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1143 if (net_ratelimit())
1144 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1145 return -EINVAL;
1146 }
1147
1148 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1149 skb_shinfo(skb)->gso_type =
1150 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1151 SKB_GSO_TCPV4 :
1152 SKB_GSO_TCPV6;
1153
1154 /* Header must be checked, and gso_segs computed. */
1155 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1156 skb_shinfo(skb)->gso_segs = 0;
1157
1158 return 0;
1159 }
1160
xennet_fill_frags(struct netfront_queue * queue,struct sk_buff * skb,struct sk_buff_head * list)1161 static int xennet_fill_frags(struct netfront_queue *queue,
1162 struct sk_buff *skb,
1163 struct sk_buff_head *list)
1164 {
1165 RING_IDX cons = queue->rx.rsp_cons;
1166 struct sk_buff *nskb;
1167
1168 while ((nskb = __skb_dequeue(list))) {
1169 struct xen_netif_rx_response rx;
1170 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1171
1172 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1173
1174 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1175 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1176
1177 BUG_ON(pull_to < skb_headlen(skb));
1178 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1179 }
1180 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1181 xennet_set_rx_rsp_cons(queue,
1182 ++cons + skb_queue_len(list));
1183 kfree_skb(nskb);
1184 return -ENOENT;
1185 }
1186
1187 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1188 skb_frag_page(nfrag),
1189 rx.offset, rx.status, PAGE_SIZE);
1190
1191 skb_shinfo(nskb)->nr_frags = 0;
1192 kfree_skb(nskb);
1193 }
1194
1195 xennet_set_rx_rsp_cons(queue, cons);
1196
1197 return 0;
1198 }
1199
checksum_setup(struct net_device * dev,struct sk_buff * skb)1200 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1201 {
1202 bool recalculate_partial_csum = false;
1203
1204 /*
1205 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1206 * peers can fail to set NETRXF_csum_blank when sending a GSO
1207 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1208 * recalculate the partial checksum.
1209 */
1210 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1211 struct netfront_info *np = netdev_priv(dev);
1212 atomic_inc(&np->rx_gso_checksum_fixup);
1213 skb->ip_summed = CHECKSUM_PARTIAL;
1214 recalculate_partial_csum = true;
1215 }
1216
1217 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1218 if (skb->ip_summed != CHECKSUM_PARTIAL)
1219 return 0;
1220
1221 return skb_checksum_setup(skb, recalculate_partial_csum);
1222 }
1223
handle_incoming_queue(struct netfront_queue * queue,struct sk_buff_head * rxq)1224 static int handle_incoming_queue(struct netfront_queue *queue,
1225 struct sk_buff_head *rxq)
1226 {
1227 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1228 int packets_dropped = 0;
1229 struct sk_buff *skb;
1230
1231 while ((skb = __skb_dequeue(rxq)) != NULL) {
1232 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1233
1234 if (pull_to > skb_headlen(skb))
1235 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1236
1237 /* Ethernet work: Delayed to here as it peeks the header. */
1238 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1239 skb_reset_network_header(skb);
1240
1241 if (checksum_setup(queue->info->netdev, skb)) {
1242 kfree_skb(skb);
1243 packets_dropped++;
1244 queue->info->netdev->stats.rx_errors++;
1245 continue;
1246 }
1247
1248 u64_stats_update_begin(&rx_stats->syncp);
1249 rx_stats->packets++;
1250 rx_stats->bytes += skb->len;
1251 u64_stats_update_end(&rx_stats->syncp);
1252
1253 /* Pass it up. */
1254 napi_gro_receive(&queue->napi, skb);
1255 }
1256
1257 return packets_dropped;
1258 }
1259
xennet_poll(struct napi_struct * napi,int budget)1260 static int xennet_poll(struct napi_struct *napi, int budget)
1261 {
1262 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1263 struct net_device *dev = queue->info->netdev;
1264 struct sk_buff *skb;
1265 struct netfront_rx_info rinfo;
1266 struct xen_netif_rx_response *rx = &rinfo.rx;
1267 struct xen_netif_extra_info *extras = rinfo.extras;
1268 RING_IDX i, rp;
1269 int work_done;
1270 struct sk_buff_head rxq;
1271 struct sk_buff_head errq;
1272 struct sk_buff_head tmpq;
1273 int err;
1274 bool need_xdp_flush = false;
1275
1276 spin_lock(&queue->rx_lock);
1277
1278 skb_queue_head_init(&rxq);
1279 skb_queue_head_init(&errq);
1280 skb_queue_head_init(&tmpq);
1281
1282 rp = queue->rx.sring->rsp_prod;
1283 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1284 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1285 rp - queue->rx.rsp_cons);
1286 queue->info->broken = true;
1287 spin_unlock(&queue->rx_lock);
1288 return 0;
1289 }
1290 rmb(); /* Ensure we see queued responses up to 'rp'. */
1291
1292 i = queue->rx.rsp_cons;
1293 work_done = 0;
1294 while ((i != rp) && (work_done < budget)) {
1295 RING_COPY_RESPONSE(&queue->rx, i, rx);
1296 memset(extras, 0, sizeof(rinfo.extras));
1297
1298 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1299 &need_xdp_flush);
1300
1301 if (unlikely(err)) {
1302 if (queue->info->broken) {
1303 spin_unlock(&queue->rx_lock);
1304 return 0;
1305 }
1306 err:
1307 while ((skb = __skb_dequeue(&tmpq)))
1308 __skb_queue_tail(&errq, skb);
1309 dev->stats.rx_errors++;
1310 i = queue->rx.rsp_cons;
1311 continue;
1312 }
1313
1314 skb = __skb_dequeue(&tmpq);
1315
1316 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1317 struct xen_netif_extra_info *gso;
1318 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1319
1320 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1321 __skb_queue_head(&tmpq, skb);
1322 xennet_set_rx_rsp_cons(queue,
1323 queue->rx.rsp_cons +
1324 skb_queue_len(&tmpq));
1325 goto err;
1326 }
1327 }
1328
1329 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1330 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1331 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1332
1333 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1334 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1335 skb->data_len = rx->status;
1336 skb->len += rx->status;
1337
1338 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1339 goto err;
1340
1341 if (rx->flags & XEN_NETRXF_csum_blank)
1342 skb->ip_summed = CHECKSUM_PARTIAL;
1343 else if (rx->flags & XEN_NETRXF_data_validated)
1344 skb->ip_summed = CHECKSUM_UNNECESSARY;
1345
1346 __skb_queue_tail(&rxq, skb);
1347
1348 i = queue->rx.rsp_cons + 1;
1349 xennet_set_rx_rsp_cons(queue, i);
1350 work_done++;
1351 }
1352 if (need_xdp_flush)
1353 xdp_do_flush();
1354
1355 __skb_queue_purge(&errq);
1356
1357 work_done -= handle_incoming_queue(queue, &rxq);
1358
1359 xennet_alloc_rx_buffers(queue);
1360
1361 if (work_done < budget) {
1362 int more_to_do = 0;
1363
1364 napi_complete_done(napi, work_done);
1365
1366 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1367 if (more_to_do)
1368 napi_schedule(napi);
1369 }
1370
1371 spin_unlock(&queue->rx_lock);
1372
1373 return work_done;
1374 }
1375
xennet_change_mtu(struct net_device * dev,int mtu)1376 static int xennet_change_mtu(struct net_device *dev, int mtu)
1377 {
1378 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1379
1380 if (mtu > max)
1381 return -EINVAL;
1382 dev->mtu = mtu;
1383 return 0;
1384 }
1385
xennet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * tot)1386 static void xennet_get_stats64(struct net_device *dev,
1387 struct rtnl_link_stats64 *tot)
1388 {
1389 struct netfront_info *np = netdev_priv(dev);
1390 int cpu;
1391
1392 for_each_possible_cpu(cpu) {
1393 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1394 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1395 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1396 unsigned int start;
1397
1398 do {
1399 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1400 tx_packets = tx_stats->packets;
1401 tx_bytes = tx_stats->bytes;
1402 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1403
1404 do {
1405 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1406 rx_packets = rx_stats->packets;
1407 rx_bytes = rx_stats->bytes;
1408 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1409
1410 tot->rx_packets += rx_packets;
1411 tot->tx_packets += tx_packets;
1412 tot->rx_bytes += rx_bytes;
1413 tot->tx_bytes += tx_bytes;
1414 }
1415
1416 tot->rx_errors = dev->stats.rx_errors;
1417 tot->tx_dropped = dev->stats.tx_dropped;
1418 }
1419
xennet_release_tx_bufs(struct netfront_queue * queue)1420 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1421 {
1422 struct sk_buff *skb;
1423 int i;
1424
1425 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1426 /* Skip over entries which are actually freelist references */
1427 if (!queue->tx_skbs[i])
1428 continue;
1429
1430 skb = queue->tx_skbs[i];
1431 queue->tx_skbs[i] = NULL;
1432 get_page(queue->grant_tx_page[i]);
1433 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1434 GNTMAP_readonly,
1435 (unsigned long)page_address(queue->grant_tx_page[i]));
1436 queue->grant_tx_page[i] = NULL;
1437 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1438 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1439 dev_kfree_skb_irq(skb);
1440 }
1441 }
1442
xennet_release_rx_bufs(struct netfront_queue * queue)1443 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1444 {
1445 int id, ref;
1446
1447 spin_lock_bh(&queue->rx_lock);
1448
1449 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1450 struct sk_buff *skb;
1451 struct page *page;
1452
1453 skb = queue->rx_skbs[id];
1454 if (!skb)
1455 continue;
1456
1457 ref = queue->grant_rx_ref[id];
1458 if (ref == GRANT_INVALID_REF)
1459 continue;
1460
1461 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1462
1463 /* gnttab_end_foreign_access() needs a page ref until
1464 * foreign access is ended (which may be deferred).
1465 */
1466 get_page(page);
1467 gnttab_end_foreign_access(ref, 0,
1468 (unsigned long)page_address(page));
1469 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1470
1471 kfree_skb(skb);
1472 }
1473
1474 spin_unlock_bh(&queue->rx_lock);
1475 }
1476
xennet_fix_features(struct net_device * dev,netdev_features_t features)1477 static netdev_features_t xennet_fix_features(struct net_device *dev,
1478 netdev_features_t features)
1479 {
1480 struct netfront_info *np = netdev_priv(dev);
1481
1482 if (features & NETIF_F_SG &&
1483 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1484 features &= ~NETIF_F_SG;
1485
1486 if (features & NETIF_F_IPV6_CSUM &&
1487 !xenbus_read_unsigned(np->xbdev->otherend,
1488 "feature-ipv6-csum-offload", 0))
1489 features &= ~NETIF_F_IPV6_CSUM;
1490
1491 if (features & NETIF_F_TSO &&
1492 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1493 features &= ~NETIF_F_TSO;
1494
1495 if (features & NETIF_F_TSO6 &&
1496 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1497 features &= ~NETIF_F_TSO6;
1498
1499 return features;
1500 }
1501
xennet_set_features(struct net_device * dev,netdev_features_t features)1502 static int xennet_set_features(struct net_device *dev,
1503 netdev_features_t features)
1504 {
1505 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1506 netdev_info(dev, "Reducing MTU because no SG offload");
1507 dev->mtu = ETH_DATA_LEN;
1508 }
1509
1510 return 0;
1511 }
1512
xennet_handle_tx(struct netfront_queue * queue,unsigned int * eoi)1513 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1514 {
1515 unsigned long flags;
1516
1517 if (unlikely(queue->info->broken))
1518 return false;
1519
1520 spin_lock_irqsave(&queue->tx_lock, flags);
1521 if (xennet_tx_buf_gc(queue))
1522 *eoi = 0;
1523 spin_unlock_irqrestore(&queue->tx_lock, flags);
1524
1525 return true;
1526 }
1527
xennet_tx_interrupt(int irq,void * dev_id)1528 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1529 {
1530 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1531
1532 if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1533 xen_irq_lateeoi(irq, eoiflag);
1534
1535 return IRQ_HANDLED;
1536 }
1537
xennet_handle_rx(struct netfront_queue * queue,unsigned int * eoi)1538 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1539 {
1540 unsigned int work_queued;
1541 unsigned long flags;
1542
1543 if (unlikely(queue->info->broken))
1544 return false;
1545
1546 spin_lock_irqsave(&queue->rx_cons_lock, flags);
1547 work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1548 if (work_queued > queue->rx_rsp_unconsumed) {
1549 queue->rx_rsp_unconsumed = work_queued;
1550 *eoi = 0;
1551 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1552 const struct device *dev = &queue->info->netdev->dev;
1553
1554 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1555 dev_alert(dev, "RX producer index going backwards\n");
1556 dev_alert(dev, "Disabled for further use\n");
1557 queue->info->broken = true;
1558 return false;
1559 }
1560 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1561
1562 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1563 napi_schedule(&queue->napi);
1564
1565 return true;
1566 }
1567
xennet_rx_interrupt(int irq,void * dev_id)1568 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1569 {
1570 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1571
1572 if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1573 xen_irq_lateeoi(irq, eoiflag);
1574
1575 return IRQ_HANDLED;
1576 }
1577
xennet_interrupt(int irq,void * dev_id)1578 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1579 {
1580 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1581
1582 if (xennet_handle_tx(dev_id, &eoiflag) &&
1583 xennet_handle_rx(dev_id, &eoiflag))
1584 xen_irq_lateeoi(irq, eoiflag);
1585
1586 return IRQ_HANDLED;
1587 }
1588
1589 #ifdef CONFIG_NET_POLL_CONTROLLER
xennet_poll_controller(struct net_device * dev)1590 static void xennet_poll_controller(struct net_device *dev)
1591 {
1592 /* Poll each queue */
1593 struct netfront_info *info = netdev_priv(dev);
1594 unsigned int num_queues = dev->real_num_tx_queues;
1595 unsigned int i;
1596
1597 if (info->broken)
1598 return;
1599
1600 for (i = 0; i < num_queues; ++i)
1601 xennet_interrupt(0, &info->queues[i]);
1602 }
1603 #endif
1604
1605 #define NETBACK_XDP_HEADROOM_DISABLE 0
1606 #define NETBACK_XDP_HEADROOM_ENABLE 1
1607
talk_to_netback_xdp(struct netfront_info * np,int xdp)1608 static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1609 {
1610 int err;
1611 unsigned short headroom;
1612
1613 headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1614 err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1615 "xdp-headroom", "%hu",
1616 headroom);
1617 if (err)
1618 pr_warn("Error writing xdp-headroom\n");
1619
1620 return err;
1621 }
1622
xennet_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)1623 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1624 struct netlink_ext_ack *extack)
1625 {
1626 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1627 struct netfront_info *np = netdev_priv(dev);
1628 struct bpf_prog *old_prog;
1629 unsigned int i, err;
1630
1631 if (dev->mtu > max_mtu) {
1632 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1633 return -EINVAL;
1634 }
1635
1636 if (!np->netback_has_xdp_headroom)
1637 return 0;
1638
1639 xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1640
1641 err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1642 NETBACK_XDP_HEADROOM_DISABLE);
1643 if (err)
1644 return err;
1645
1646 /* avoid the race with XDP headroom adjustment */
1647 wait_event(module_wq,
1648 xenbus_read_driver_state(np->xbdev->otherend) ==
1649 XenbusStateReconfigured);
1650 np->netfront_xdp_enabled = true;
1651
1652 old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1653
1654 if (prog)
1655 bpf_prog_add(prog, dev->real_num_tx_queues);
1656
1657 for (i = 0; i < dev->real_num_tx_queues; ++i)
1658 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1659
1660 if (old_prog)
1661 for (i = 0; i < dev->real_num_tx_queues; ++i)
1662 bpf_prog_put(old_prog);
1663
1664 xenbus_switch_state(np->xbdev, XenbusStateConnected);
1665
1666 return 0;
1667 }
1668
xennet_xdp(struct net_device * dev,struct netdev_bpf * xdp)1669 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1670 {
1671 struct netfront_info *np = netdev_priv(dev);
1672
1673 if (np->broken)
1674 return -ENODEV;
1675
1676 switch (xdp->command) {
1677 case XDP_SETUP_PROG:
1678 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1679 default:
1680 return -EINVAL;
1681 }
1682 }
1683
1684 static const struct net_device_ops xennet_netdev_ops = {
1685 .ndo_uninit = xennet_uninit,
1686 .ndo_open = xennet_open,
1687 .ndo_stop = xennet_close,
1688 .ndo_start_xmit = xennet_start_xmit,
1689 .ndo_change_mtu = xennet_change_mtu,
1690 .ndo_get_stats64 = xennet_get_stats64,
1691 .ndo_set_mac_address = eth_mac_addr,
1692 .ndo_validate_addr = eth_validate_addr,
1693 .ndo_fix_features = xennet_fix_features,
1694 .ndo_set_features = xennet_set_features,
1695 .ndo_select_queue = xennet_select_queue,
1696 .ndo_bpf = xennet_xdp,
1697 .ndo_xdp_xmit = xennet_xdp_xmit,
1698 #ifdef CONFIG_NET_POLL_CONTROLLER
1699 .ndo_poll_controller = xennet_poll_controller,
1700 #endif
1701 };
1702
xennet_free_netdev(struct net_device * netdev)1703 static void xennet_free_netdev(struct net_device *netdev)
1704 {
1705 struct netfront_info *np = netdev_priv(netdev);
1706
1707 free_percpu(np->rx_stats);
1708 free_percpu(np->tx_stats);
1709 free_netdev(netdev);
1710 }
1711
xennet_create_dev(struct xenbus_device * dev)1712 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1713 {
1714 int err;
1715 struct net_device *netdev;
1716 struct netfront_info *np;
1717
1718 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1719 if (!netdev)
1720 return ERR_PTR(-ENOMEM);
1721
1722 np = netdev_priv(netdev);
1723 np->xbdev = dev;
1724
1725 np->queues = NULL;
1726
1727 err = -ENOMEM;
1728 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1729 if (np->rx_stats == NULL)
1730 goto exit;
1731 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1732 if (np->tx_stats == NULL)
1733 goto exit;
1734
1735 netdev->netdev_ops = &xennet_netdev_ops;
1736
1737 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1738 NETIF_F_GSO_ROBUST;
1739 netdev->hw_features = NETIF_F_SG |
1740 NETIF_F_IPV6_CSUM |
1741 NETIF_F_TSO | NETIF_F_TSO6;
1742
1743 /*
1744 * Assume that all hw features are available for now. This set
1745 * will be adjusted by the call to netdev_update_features() in
1746 * xennet_connect() which is the earliest point where we can
1747 * negotiate with the backend regarding supported features.
1748 */
1749 netdev->features |= netdev->hw_features;
1750
1751 netdev->ethtool_ops = &xennet_ethtool_ops;
1752 netdev->min_mtu = ETH_MIN_MTU;
1753 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1754 SET_NETDEV_DEV(netdev, &dev->dev);
1755
1756 np->netdev = netdev;
1757 np->netfront_xdp_enabled = false;
1758
1759 netif_carrier_off(netdev);
1760
1761 do {
1762 xenbus_switch_state(dev, XenbusStateInitialising);
1763 err = wait_event_timeout(module_wq,
1764 xenbus_read_driver_state(dev->otherend) !=
1765 XenbusStateClosed &&
1766 xenbus_read_driver_state(dev->otherend) !=
1767 XenbusStateUnknown, XENNET_TIMEOUT);
1768 } while (!err);
1769
1770 return netdev;
1771
1772 exit:
1773 xennet_free_netdev(netdev);
1774 return ERR_PTR(err);
1775 }
1776
1777 /**
1778 * Entry point to this code when a new device is created. Allocate the basic
1779 * structures and the ring buffers for communication with the backend, and
1780 * inform the backend of the appropriate details for those.
1781 */
netfront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)1782 static int netfront_probe(struct xenbus_device *dev,
1783 const struct xenbus_device_id *id)
1784 {
1785 int err;
1786 struct net_device *netdev;
1787 struct netfront_info *info;
1788
1789 netdev = xennet_create_dev(dev);
1790 if (IS_ERR(netdev)) {
1791 err = PTR_ERR(netdev);
1792 xenbus_dev_fatal(dev, err, "creating netdev");
1793 return err;
1794 }
1795
1796 info = netdev_priv(netdev);
1797 dev_set_drvdata(&dev->dev, info);
1798 #ifdef CONFIG_SYSFS
1799 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1800 #endif
1801
1802 return 0;
1803 }
1804
xennet_end_access(int ref,void * page)1805 static void xennet_end_access(int ref, void *page)
1806 {
1807 /* This frees the page as a side-effect */
1808 if (ref != GRANT_INVALID_REF)
1809 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1810 }
1811
xennet_disconnect_backend(struct netfront_info * info)1812 static void xennet_disconnect_backend(struct netfront_info *info)
1813 {
1814 unsigned int i = 0;
1815 unsigned int num_queues = info->netdev->real_num_tx_queues;
1816
1817 netif_carrier_off(info->netdev);
1818
1819 for (i = 0; i < num_queues && info->queues; ++i) {
1820 struct netfront_queue *queue = &info->queues[i];
1821
1822 del_timer_sync(&queue->rx_refill_timer);
1823
1824 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1825 unbind_from_irqhandler(queue->tx_irq, queue);
1826 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1827 unbind_from_irqhandler(queue->tx_irq, queue);
1828 unbind_from_irqhandler(queue->rx_irq, queue);
1829 }
1830 queue->tx_evtchn = queue->rx_evtchn = 0;
1831 queue->tx_irq = queue->rx_irq = 0;
1832
1833 if (netif_running(info->netdev))
1834 napi_synchronize(&queue->napi);
1835
1836 xennet_release_tx_bufs(queue);
1837 xennet_release_rx_bufs(queue);
1838 gnttab_free_grant_references(queue->gref_tx_head);
1839 gnttab_free_grant_references(queue->gref_rx_head);
1840
1841 /* End access and free the pages */
1842 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1843 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1844
1845 queue->tx_ring_ref = GRANT_INVALID_REF;
1846 queue->rx_ring_ref = GRANT_INVALID_REF;
1847 queue->tx.sring = NULL;
1848 queue->rx.sring = NULL;
1849
1850 page_pool_destroy(queue->page_pool);
1851 }
1852 }
1853
1854 /**
1855 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1856 * driver restart. We tear down our netif structure and recreate it, but
1857 * leave the device-layer structures intact so that this is transparent to the
1858 * rest of the kernel.
1859 */
netfront_resume(struct xenbus_device * dev)1860 static int netfront_resume(struct xenbus_device *dev)
1861 {
1862 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1863
1864 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1865
1866 netif_tx_lock_bh(info->netdev);
1867 netif_device_detach(info->netdev);
1868 netif_tx_unlock_bh(info->netdev);
1869
1870 xennet_disconnect_backend(info);
1871
1872 rtnl_lock();
1873 if (info->queues)
1874 xennet_destroy_queues(info);
1875 rtnl_unlock();
1876
1877 return 0;
1878 }
1879
xen_net_read_mac(struct xenbus_device * dev,u8 mac[])1880 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1881 {
1882 char *s, *e, *macstr;
1883 int i;
1884
1885 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1886 if (IS_ERR(macstr))
1887 return PTR_ERR(macstr);
1888
1889 for (i = 0; i < ETH_ALEN; i++) {
1890 mac[i] = simple_strtoul(s, &e, 16);
1891 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1892 kfree(macstr);
1893 return -ENOENT;
1894 }
1895 s = e+1;
1896 }
1897
1898 kfree(macstr);
1899 return 0;
1900 }
1901
setup_netfront_single(struct netfront_queue * queue)1902 static int setup_netfront_single(struct netfront_queue *queue)
1903 {
1904 int err;
1905
1906 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1907 if (err < 0)
1908 goto fail;
1909
1910 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1911 xennet_interrupt, 0,
1912 queue->info->netdev->name,
1913 queue);
1914 if (err < 0)
1915 goto bind_fail;
1916 queue->rx_evtchn = queue->tx_evtchn;
1917 queue->rx_irq = queue->tx_irq = err;
1918
1919 return 0;
1920
1921 bind_fail:
1922 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1923 queue->tx_evtchn = 0;
1924 fail:
1925 return err;
1926 }
1927
setup_netfront_split(struct netfront_queue * queue)1928 static int setup_netfront_split(struct netfront_queue *queue)
1929 {
1930 int err;
1931
1932 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1933 if (err < 0)
1934 goto fail;
1935 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1936 if (err < 0)
1937 goto alloc_rx_evtchn_fail;
1938
1939 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1940 "%s-tx", queue->name);
1941 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1942 xennet_tx_interrupt, 0,
1943 queue->tx_irq_name, queue);
1944 if (err < 0)
1945 goto bind_tx_fail;
1946 queue->tx_irq = err;
1947
1948 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1949 "%s-rx", queue->name);
1950 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1951 xennet_rx_interrupt, 0,
1952 queue->rx_irq_name, queue);
1953 if (err < 0)
1954 goto bind_rx_fail;
1955 queue->rx_irq = err;
1956
1957 return 0;
1958
1959 bind_rx_fail:
1960 unbind_from_irqhandler(queue->tx_irq, queue);
1961 queue->tx_irq = 0;
1962 bind_tx_fail:
1963 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1964 queue->rx_evtchn = 0;
1965 alloc_rx_evtchn_fail:
1966 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1967 queue->tx_evtchn = 0;
1968 fail:
1969 return err;
1970 }
1971
setup_netfront(struct xenbus_device * dev,struct netfront_queue * queue,unsigned int feature_split_evtchn)1972 static int setup_netfront(struct xenbus_device *dev,
1973 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1974 {
1975 struct xen_netif_tx_sring *txs;
1976 struct xen_netif_rx_sring *rxs = NULL;
1977 grant_ref_t gref;
1978 int err;
1979
1980 queue->tx_ring_ref = GRANT_INVALID_REF;
1981 queue->rx_ring_ref = GRANT_INVALID_REF;
1982 queue->rx.sring = NULL;
1983 queue->tx.sring = NULL;
1984
1985 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1986 if (!txs) {
1987 err = -ENOMEM;
1988 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1989 goto fail;
1990 }
1991 SHARED_RING_INIT(txs);
1992 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1993
1994 err = xenbus_grant_ring(dev, txs, 1, &gref);
1995 if (err < 0)
1996 goto fail;
1997 queue->tx_ring_ref = gref;
1998
1999 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
2000 if (!rxs) {
2001 err = -ENOMEM;
2002 xenbus_dev_fatal(dev, err, "allocating rx ring page");
2003 goto fail;
2004 }
2005 SHARED_RING_INIT(rxs);
2006 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
2007
2008 err = xenbus_grant_ring(dev, rxs, 1, &gref);
2009 if (err < 0)
2010 goto fail;
2011 queue->rx_ring_ref = gref;
2012
2013 if (feature_split_evtchn)
2014 err = setup_netfront_split(queue);
2015 /* setup single event channel if
2016 * a) feature-split-event-channels == 0
2017 * b) feature-split-event-channels == 1 but failed to setup
2018 */
2019 if (!feature_split_evtchn || (feature_split_evtchn && err))
2020 err = setup_netfront_single(queue);
2021
2022 if (err)
2023 goto fail;
2024
2025 return 0;
2026
2027 /* If we fail to setup netfront, it is safe to just revoke access to
2028 * granted pages because backend is not accessing it at this point.
2029 */
2030 fail:
2031 if (queue->rx_ring_ref != GRANT_INVALID_REF) {
2032 gnttab_end_foreign_access(queue->rx_ring_ref, 0,
2033 (unsigned long)rxs);
2034 queue->rx_ring_ref = GRANT_INVALID_REF;
2035 } else {
2036 free_page((unsigned long)rxs);
2037 }
2038 if (queue->tx_ring_ref != GRANT_INVALID_REF) {
2039 gnttab_end_foreign_access(queue->tx_ring_ref, 0,
2040 (unsigned long)txs);
2041 queue->tx_ring_ref = GRANT_INVALID_REF;
2042 } else {
2043 free_page((unsigned long)txs);
2044 }
2045 return err;
2046 }
2047
2048 /* Queue-specific initialisation
2049 * This used to be done in xennet_create_dev() but must now
2050 * be run per-queue.
2051 */
xennet_init_queue(struct netfront_queue * queue)2052 static int xennet_init_queue(struct netfront_queue *queue)
2053 {
2054 unsigned short i;
2055 int err = 0;
2056 char *devid;
2057
2058 spin_lock_init(&queue->tx_lock);
2059 spin_lock_init(&queue->rx_lock);
2060 spin_lock_init(&queue->rx_cons_lock);
2061
2062 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2063
2064 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2065 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2066 devid, queue->id);
2067
2068 /* Initialise tx_skb_freelist as a free chain containing every entry. */
2069 queue->tx_skb_freelist = 0;
2070 queue->tx_pend_queue = TX_LINK_NONE;
2071 for (i = 0; i < NET_TX_RING_SIZE; i++) {
2072 queue->tx_link[i] = i + 1;
2073 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
2074 queue->grant_tx_page[i] = NULL;
2075 }
2076 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2077
2078 /* Clear out rx_skbs */
2079 for (i = 0; i < NET_RX_RING_SIZE; i++) {
2080 queue->rx_skbs[i] = NULL;
2081 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
2082 }
2083
2084 /* A grant for every tx ring slot */
2085 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2086 &queue->gref_tx_head) < 0) {
2087 pr_alert("can't alloc tx grant refs\n");
2088 err = -ENOMEM;
2089 goto exit;
2090 }
2091
2092 /* A grant for every rx ring slot */
2093 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2094 &queue->gref_rx_head) < 0) {
2095 pr_alert("can't alloc rx grant refs\n");
2096 err = -ENOMEM;
2097 goto exit_free_tx;
2098 }
2099
2100 return 0;
2101
2102 exit_free_tx:
2103 gnttab_free_grant_references(queue->gref_tx_head);
2104 exit:
2105 return err;
2106 }
2107
write_queue_xenstore_keys(struct netfront_queue * queue,struct xenbus_transaction * xbt,int write_hierarchical)2108 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2109 struct xenbus_transaction *xbt, int write_hierarchical)
2110 {
2111 /* Write the queue-specific keys into XenStore in the traditional
2112 * way for a single queue, or in a queue subkeys for multiple
2113 * queues.
2114 */
2115 struct xenbus_device *dev = queue->info->xbdev;
2116 int err;
2117 const char *message;
2118 char *path;
2119 size_t pathsize;
2120
2121 /* Choose the correct place to write the keys */
2122 if (write_hierarchical) {
2123 pathsize = strlen(dev->nodename) + 10;
2124 path = kzalloc(pathsize, GFP_KERNEL);
2125 if (!path) {
2126 err = -ENOMEM;
2127 message = "out of memory while writing ring references";
2128 goto error;
2129 }
2130 snprintf(path, pathsize, "%s/queue-%u",
2131 dev->nodename, queue->id);
2132 } else {
2133 path = (char *)dev->nodename;
2134 }
2135
2136 /* Write ring references */
2137 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2138 queue->tx_ring_ref);
2139 if (err) {
2140 message = "writing tx-ring-ref";
2141 goto error;
2142 }
2143
2144 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2145 queue->rx_ring_ref);
2146 if (err) {
2147 message = "writing rx-ring-ref";
2148 goto error;
2149 }
2150
2151 /* Write event channels; taking into account both shared
2152 * and split event channel scenarios.
2153 */
2154 if (queue->tx_evtchn == queue->rx_evtchn) {
2155 /* Shared event channel */
2156 err = xenbus_printf(*xbt, path,
2157 "event-channel", "%u", queue->tx_evtchn);
2158 if (err) {
2159 message = "writing event-channel";
2160 goto error;
2161 }
2162 } else {
2163 /* Split event channels */
2164 err = xenbus_printf(*xbt, path,
2165 "event-channel-tx", "%u", queue->tx_evtchn);
2166 if (err) {
2167 message = "writing event-channel-tx";
2168 goto error;
2169 }
2170
2171 err = xenbus_printf(*xbt, path,
2172 "event-channel-rx", "%u", queue->rx_evtchn);
2173 if (err) {
2174 message = "writing event-channel-rx";
2175 goto error;
2176 }
2177 }
2178
2179 if (write_hierarchical)
2180 kfree(path);
2181 return 0;
2182
2183 error:
2184 if (write_hierarchical)
2185 kfree(path);
2186 xenbus_dev_fatal(dev, err, "%s", message);
2187 return err;
2188 }
2189
2190
2191
xennet_create_page_pool(struct netfront_queue * queue)2192 static int xennet_create_page_pool(struct netfront_queue *queue)
2193 {
2194 int err;
2195 struct page_pool_params pp_params = {
2196 .order = 0,
2197 .flags = 0,
2198 .pool_size = NET_RX_RING_SIZE,
2199 .nid = NUMA_NO_NODE,
2200 .dev = &queue->info->netdev->dev,
2201 .offset = XDP_PACKET_HEADROOM,
2202 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2203 };
2204
2205 queue->page_pool = page_pool_create(&pp_params);
2206 if (IS_ERR(queue->page_pool)) {
2207 err = PTR_ERR(queue->page_pool);
2208 queue->page_pool = NULL;
2209 return err;
2210 }
2211
2212 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2213 queue->id);
2214 if (err) {
2215 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2216 goto err_free_pp;
2217 }
2218
2219 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2220 MEM_TYPE_PAGE_POOL, queue->page_pool);
2221 if (err) {
2222 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2223 goto err_unregister_rxq;
2224 }
2225 return 0;
2226
2227 err_unregister_rxq:
2228 xdp_rxq_info_unreg(&queue->xdp_rxq);
2229 err_free_pp:
2230 page_pool_destroy(queue->page_pool);
2231 queue->page_pool = NULL;
2232 return err;
2233 }
2234
xennet_create_queues(struct netfront_info * info,unsigned int * num_queues)2235 static int xennet_create_queues(struct netfront_info *info,
2236 unsigned int *num_queues)
2237 {
2238 unsigned int i;
2239 int ret;
2240
2241 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2242 GFP_KERNEL);
2243 if (!info->queues)
2244 return -ENOMEM;
2245
2246 for (i = 0; i < *num_queues; i++) {
2247 struct netfront_queue *queue = &info->queues[i];
2248
2249 queue->id = i;
2250 queue->info = info;
2251
2252 ret = xennet_init_queue(queue);
2253 if (ret < 0) {
2254 dev_warn(&info->xbdev->dev,
2255 "only created %d queues\n", i);
2256 *num_queues = i;
2257 break;
2258 }
2259
2260 /* use page pool recycling instead of buddy allocator */
2261 ret = xennet_create_page_pool(queue);
2262 if (ret < 0) {
2263 dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2264 *num_queues = i;
2265 return ret;
2266 }
2267
2268 netif_napi_add(queue->info->netdev, &queue->napi,
2269 xennet_poll, 64);
2270 if (netif_running(info->netdev))
2271 napi_enable(&queue->napi);
2272 }
2273
2274 netif_set_real_num_tx_queues(info->netdev, *num_queues);
2275
2276 if (*num_queues == 0) {
2277 dev_err(&info->xbdev->dev, "no queues\n");
2278 return -EINVAL;
2279 }
2280 return 0;
2281 }
2282
2283 /* Common code used when first setting up, and when resuming. */
talk_to_netback(struct xenbus_device * dev,struct netfront_info * info)2284 static int talk_to_netback(struct xenbus_device *dev,
2285 struct netfront_info *info)
2286 {
2287 const char *message;
2288 struct xenbus_transaction xbt;
2289 int err;
2290 unsigned int feature_split_evtchn;
2291 unsigned int i = 0;
2292 unsigned int max_queues = 0;
2293 struct netfront_queue *queue = NULL;
2294 unsigned int num_queues = 1;
2295
2296 info->netdev->irq = 0;
2297
2298 /* Check if backend is trusted. */
2299 info->bounce = !xennet_trusted ||
2300 !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2301
2302 /* Check if backend supports multiple queues */
2303 max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2304 "multi-queue-max-queues", 1);
2305 num_queues = min(max_queues, xennet_max_queues);
2306
2307 /* Check feature-split-event-channels */
2308 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2309 "feature-split-event-channels", 0);
2310
2311 /* Read mac addr. */
2312 err = xen_net_read_mac(dev, info->netdev->dev_addr);
2313 if (err) {
2314 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2315 goto out_unlocked;
2316 }
2317
2318 info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2319 "feature-xdp-headroom", 0);
2320 if (info->netback_has_xdp_headroom) {
2321 /* set the current xen-netfront xdp state */
2322 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2323 NETBACK_XDP_HEADROOM_ENABLE :
2324 NETBACK_XDP_HEADROOM_DISABLE);
2325 if (err)
2326 goto out_unlocked;
2327 }
2328
2329 rtnl_lock();
2330 if (info->queues)
2331 xennet_destroy_queues(info);
2332
2333 /* For the case of a reconnect reset the "broken" indicator. */
2334 info->broken = false;
2335
2336 err = xennet_create_queues(info, &num_queues);
2337 if (err < 0) {
2338 xenbus_dev_fatal(dev, err, "creating queues");
2339 kfree(info->queues);
2340 info->queues = NULL;
2341 goto out;
2342 }
2343 rtnl_unlock();
2344
2345 /* Create shared ring, alloc event channel -- for each queue */
2346 for (i = 0; i < num_queues; ++i) {
2347 queue = &info->queues[i];
2348 err = setup_netfront(dev, queue, feature_split_evtchn);
2349 if (err)
2350 goto destroy_ring;
2351 }
2352
2353 again:
2354 err = xenbus_transaction_start(&xbt);
2355 if (err) {
2356 xenbus_dev_fatal(dev, err, "starting transaction");
2357 goto destroy_ring;
2358 }
2359
2360 if (xenbus_exists(XBT_NIL,
2361 info->xbdev->otherend, "multi-queue-max-queues")) {
2362 /* Write the number of queues */
2363 err = xenbus_printf(xbt, dev->nodename,
2364 "multi-queue-num-queues", "%u", num_queues);
2365 if (err) {
2366 message = "writing multi-queue-num-queues";
2367 goto abort_transaction_no_dev_fatal;
2368 }
2369 }
2370
2371 if (num_queues == 1) {
2372 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2373 if (err)
2374 goto abort_transaction_no_dev_fatal;
2375 } else {
2376 /* Write the keys for each queue */
2377 for (i = 0; i < num_queues; ++i) {
2378 queue = &info->queues[i];
2379 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2380 if (err)
2381 goto abort_transaction_no_dev_fatal;
2382 }
2383 }
2384
2385 /* The remaining keys are not queue-specific */
2386 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2387 1);
2388 if (err) {
2389 message = "writing request-rx-copy";
2390 goto abort_transaction;
2391 }
2392
2393 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2394 if (err) {
2395 message = "writing feature-rx-notify";
2396 goto abort_transaction;
2397 }
2398
2399 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2400 if (err) {
2401 message = "writing feature-sg";
2402 goto abort_transaction;
2403 }
2404
2405 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2406 if (err) {
2407 message = "writing feature-gso-tcpv4";
2408 goto abort_transaction;
2409 }
2410
2411 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2412 if (err) {
2413 message = "writing feature-gso-tcpv6";
2414 goto abort_transaction;
2415 }
2416
2417 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2418 "1");
2419 if (err) {
2420 message = "writing feature-ipv6-csum-offload";
2421 goto abort_transaction;
2422 }
2423
2424 err = xenbus_transaction_end(xbt, 0);
2425 if (err) {
2426 if (err == -EAGAIN)
2427 goto again;
2428 xenbus_dev_fatal(dev, err, "completing transaction");
2429 goto destroy_ring;
2430 }
2431
2432 return 0;
2433
2434 abort_transaction:
2435 xenbus_dev_fatal(dev, err, "%s", message);
2436 abort_transaction_no_dev_fatal:
2437 xenbus_transaction_end(xbt, 1);
2438 destroy_ring:
2439 xennet_disconnect_backend(info);
2440 rtnl_lock();
2441 xennet_destroy_queues(info);
2442 out:
2443 rtnl_unlock();
2444 out_unlocked:
2445 device_unregister(&dev->dev);
2446 return err;
2447 }
2448
xennet_connect(struct net_device * dev)2449 static int xennet_connect(struct net_device *dev)
2450 {
2451 struct netfront_info *np = netdev_priv(dev);
2452 unsigned int num_queues = 0;
2453 int err;
2454 unsigned int j = 0;
2455 struct netfront_queue *queue = NULL;
2456
2457 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2458 dev_info(&dev->dev,
2459 "backend does not support copying receive path\n");
2460 return -ENODEV;
2461 }
2462
2463 err = talk_to_netback(np->xbdev, np);
2464 if (err)
2465 return err;
2466 if (np->netback_has_xdp_headroom)
2467 pr_info("backend supports XDP headroom\n");
2468 if (np->bounce)
2469 dev_info(&np->xbdev->dev,
2470 "bouncing transmitted data to zeroed pages\n");
2471
2472 /* talk_to_netback() sets the correct number of queues */
2473 num_queues = dev->real_num_tx_queues;
2474
2475 if (dev->reg_state == NETREG_UNINITIALIZED) {
2476 err = register_netdev(dev);
2477 if (err) {
2478 pr_warn("%s: register_netdev err=%d\n", __func__, err);
2479 device_unregister(&np->xbdev->dev);
2480 return err;
2481 }
2482 }
2483
2484 rtnl_lock();
2485 netdev_update_features(dev);
2486 rtnl_unlock();
2487
2488 /*
2489 * All public and private state should now be sane. Get
2490 * ready to start sending and receiving packets and give the driver
2491 * domain a kick because we've probably just requeued some
2492 * packets.
2493 */
2494 netif_tx_lock_bh(np->netdev);
2495 netif_device_attach(np->netdev);
2496 netif_tx_unlock_bh(np->netdev);
2497
2498 netif_carrier_on(np->netdev);
2499 for (j = 0; j < num_queues; ++j) {
2500 queue = &np->queues[j];
2501
2502 notify_remote_via_irq(queue->tx_irq);
2503 if (queue->tx_irq != queue->rx_irq)
2504 notify_remote_via_irq(queue->rx_irq);
2505
2506 spin_lock_irq(&queue->tx_lock);
2507 xennet_tx_buf_gc(queue);
2508 spin_unlock_irq(&queue->tx_lock);
2509
2510 spin_lock_bh(&queue->rx_lock);
2511 xennet_alloc_rx_buffers(queue);
2512 spin_unlock_bh(&queue->rx_lock);
2513 }
2514
2515 return 0;
2516 }
2517
2518 /**
2519 * Callback received when the backend's state changes.
2520 */
netback_changed(struct xenbus_device * dev,enum xenbus_state backend_state)2521 static void netback_changed(struct xenbus_device *dev,
2522 enum xenbus_state backend_state)
2523 {
2524 struct netfront_info *np = dev_get_drvdata(&dev->dev);
2525 struct net_device *netdev = np->netdev;
2526
2527 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2528
2529 wake_up_all(&module_wq);
2530
2531 switch (backend_state) {
2532 case XenbusStateInitialising:
2533 case XenbusStateInitialised:
2534 case XenbusStateReconfiguring:
2535 case XenbusStateReconfigured:
2536 case XenbusStateUnknown:
2537 break;
2538
2539 case XenbusStateInitWait:
2540 if (dev->state != XenbusStateInitialising)
2541 break;
2542 if (xennet_connect(netdev) != 0)
2543 break;
2544 xenbus_switch_state(dev, XenbusStateConnected);
2545 break;
2546
2547 case XenbusStateConnected:
2548 netdev_notify_peers(netdev);
2549 break;
2550
2551 case XenbusStateClosed:
2552 if (dev->state == XenbusStateClosed)
2553 break;
2554 fallthrough; /* Missed the backend's CLOSING state */
2555 case XenbusStateClosing:
2556 xenbus_frontend_closed(dev);
2557 break;
2558 }
2559 }
2560
2561 static const struct xennet_stat {
2562 char name[ETH_GSTRING_LEN];
2563 u16 offset;
2564 } xennet_stats[] = {
2565 {
2566 "rx_gso_checksum_fixup",
2567 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2568 },
2569 };
2570
xennet_get_sset_count(struct net_device * dev,int string_set)2571 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2572 {
2573 switch (string_set) {
2574 case ETH_SS_STATS:
2575 return ARRAY_SIZE(xennet_stats);
2576 default:
2577 return -EINVAL;
2578 }
2579 }
2580
xennet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2581 static void xennet_get_ethtool_stats(struct net_device *dev,
2582 struct ethtool_stats *stats, u64 * data)
2583 {
2584 void *np = netdev_priv(dev);
2585 int i;
2586
2587 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2588 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2589 }
2590
xennet_get_strings(struct net_device * dev,u32 stringset,u8 * data)2591 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2592 {
2593 int i;
2594
2595 switch (stringset) {
2596 case ETH_SS_STATS:
2597 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2598 memcpy(data + i * ETH_GSTRING_LEN,
2599 xennet_stats[i].name, ETH_GSTRING_LEN);
2600 break;
2601 }
2602 }
2603
2604 static const struct ethtool_ops xennet_ethtool_ops =
2605 {
2606 .get_link = ethtool_op_get_link,
2607
2608 .get_sset_count = xennet_get_sset_count,
2609 .get_ethtool_stats = xennet_get_ethtool_stats,
2610 .get_strings = xennet_get_strings,
2611 .get_ts_info = ethtool_op_get_ts_info,
2612 };
2613
2614 #ifdef CONFIG_SYSFS
show_rxbuf(struct device * dev,struct device_attribute * attr,char * buf)2615 static ssize_t show_rxbuf(struct device *dev,
2616 struct device_attribute *attr, char *buf)
2617 {
2618 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2619 }
2620
store_rxbuf(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2621 static ssize_t store_rxbuf(struct device *dev,
2622 struct device_attribute *attr,
2623 const char *buf, size_t len)
2624 {
2625 char *endp;
2626 unsigned long target;
2627
2628 if (!capable(CAP_NET_ADMIN))
2629 return -EPERM;
2630
2631 target = simple_strtoul(buf, &endp, 0);
2632 if (endp == buf)
2633 return -EBADMSG;
2634
2635 /* rxbuf_min and rxbuf_max are no longer configurable. */
2636
2637 return len;
2638 }
2639
2640 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2641 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2642 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2643
2644 static struct attribute *xennet_dev_attrs[] = {
2645 &dev_attr_rxbuf_min.attr,
2646 &dev_attr_rxbuf_max.attr,
2647 &dev_attr_rxbuf_cur.attr,
2648 NULL
2649 };
2650
2651 static const struct attribute_group xennet_dev_group = {
2652 .attrs = xennet_dev_attrs
2653 };
2654 #endif /* CONFIG_SYSFS */
2655
xennet_bus_close(struct xenbus_device * dev)2656 static void xennet_bus_close(struct xenbus_device *dev)
2657 {
2658 int ret;
2659
2660 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2661 return;
2662 do {
2663 xenbus_switch_state(dev, XenbusStateClosing);
2664 ret = wait_event_timeout(module_wq,
2665 xenbus_read_driver_state(dev->otherend) ==
2666 XenbusStateClosing ||
2667 xenbus_read_driver_state(dev->otherend) ==
2668 XenbusStateClosed ||
2669 xenbus_read_driver_state(dev->otherend) ==
2670 XenbusStateUnknown,
2671 XENNET_TIMEOUT);
2672 } while (!ret);
2673
2674 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2675 return;
2676
2677 do {
2678 xenbus_switch_state(dev, XenbusStateClosed);
2679 ret = wait_event_timeout(module_wq,
2680 xenbus_read_driver_state(dev->otherend) ==
2681 XenbusStateClosed ||
2682 xenbus_read_driver_state(dev->otherend) ==
2683 XenbusStateUnknown,
2684 XENNET_TIMEOUT);
2685 } while (!ret);
2686 }
2687
xennet_remove(struct xenbus_device * dev)2688 static int xennet_remove(struct xenbus_device *dev)
2689 {
2690 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2691
2692 xennet_bus_close(dev);
2693 xennet_disconnect_backend(info);
2694
2695 if (info->netdev->reg_state == NETREG_REGISTERED)
2696 unregister_netdev(info->netdev);
2697
2698 if (info->queues) {
2699 rtnl_lock();
2700 xennet_destroy_queues(info);
2701 rtnl_unlock();
2702 }
2703 xennet_free_netdev(info->netdev);
2704
2705 return 0;
2706 }
2707
2708 static const struct xenbus_device_id netfront_ids[] = {
2709 { "vif" },
2710 { "" }
2711 };
2712
2713 static struct xenbus_driver netfront_driver = {
2714 .ids = netfront_ids,
2715 .probe = netfront_probe,
2716 .remove = xennet_remove,
2717 .resume = netfront_resume,
2718 .otherend_changed = netback_changed,
2719 };
2720
netif_init(void)2721 static int __init netif_init(void)
2722 {
2723 if (!xen_domain())
2724 return -ENODEV;
2725
2726 if (!xen_has_pv_nic_devices())
2727 return -ENODEV;
2728
2729 pr_info("Initialising Xen virtual ethernet driver\n");
2730
2731 /* Allow as many queues as there are CPUs inut max. 8 if user has not
2732 * specified a value.
2733 */
2734 if (xennet_max_queues == 0)
2735 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2736 num_online_cpus());
2737
2738 return xenbus_register_frontend(&netfront_driver);
2739 }
2740 module_init(netif_init);
2741
2742
netif_exit(void)2743 static void __exit netif_exit(void)
2744 {
2745 xenbus_unregister_driver(&netfront_driver);
2746 }
2747 module_exit(netif_exit);
2748
2749 MODULE_DESCRIPTION("Xen virtual network device frontend");
2750 MODULE_LICENSE("GPL");
2751 MODULE_ALIAS("xen:vif");
2752 MODULE_ALIAS("xennet");
2753