1 /*
2 * Virtual network driver for conversing with remote driver backends.
3 *
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 * IN THE SOFTWARE.
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 #include <linux/bpf.h>
48 #include <net/page_pool.h>
49 #include <linux/bpf_trace.h>
50
51 #include <xen/xen.h>
52 #include <xen/xenbus.h>
53 #include <xen/events.h>
54 #include <xen/page.h>
55 #include <xen/platform_pci.h>
56 #include <xen/grant_table.h>
57
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/interface/grant_table.h>
61
62 /* Module parameters */
63 #define MAX_QUEUES_DEFAULT 8
64 static unsigned int xennet_max_queues;
65 module_param_named(max_queues, xennet_max_queues, uint, 0644);
66 MODULE_PARM_DESC(max_queues,
67 "Maximum number of queues per virtual interface");
68
69 #define XENNET_TIMEOUT (5 * HZ)
70
71 static const struct ethtool_ops xennet_ethtool_ops;
72
73 struct netfront_cb {
74 int pull_to;
75 };
76
77 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
78
79 #define RX_COPY_THRESHOLD 256
80
81 #define GRANT_INVALID_REF 0
82
83 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
84 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
85
86 /* Minimum number of Rx slots (includes slot for GSO metadata). */
87 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
88
89 /* Queue name is interface name with "-qNNN" appended */
90 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
91
92 /* IRQ name is queue name with "-tx" or "-rx" appended */
93 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
94
95 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
96
97 struct netfront_stats {
98 u64 packets;
99 u64 bytes;
100 struct u64_stats_sync syncp;
101 };
102
103 struct netfront_info;
104
105 struct netfront_queue {
106 unsigned int id; /* Queue ID, 0-based */
107 char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
108 struct netfront_info *info;
109
110 struct bpf_prog __rcu *xdp_prog;
111
112 struct napi_struct napi;
113
114 /* Split event channels support, tx_* == rx_* when using
115 * single event channel.
116 */
117 unsigned int tx_evtchn, rx_evtchn;
118 unsigned int tx_irq, rx_irq;
119 /* Only used when split event channels support is enabled */
120 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
121 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
122
123 spinlock_t tx_lock;
124 struct xen_netif_tx_front_ring tx;
125 int tx_ring_ref;
126
127 /*
128 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
129 * are linked from tx_skb_freelist through tx_link.
130 */
131 struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
132 unsigned short tx_link[NET_TX_RING_SIZE];
133 #define TX_LINK_NONE 0xffff
134 #define TX_PENDING 0xfffe
135 grant_ref_t gref_tx_head;
136 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
137 struct page *grant_tx_page[NET_TX_RING_SIZE];
138 unsigned tx_skb_freelist;
139 unsigned int tx_pend_queue;
140
141 spinlock_t rx_lock ____cacheline_aligned_in_smp;
142 struct xen_netif_rx_front_ring rx;
143 int rx_ring_ref;
144
145 struct timer_list rx_refill_timer;
146
147 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
148 grant_ref_t gref_rx_head;
149 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
150
151 unsigned int rx_rsp_unconsumed;
152 spinlock_t rx_cons_lock;
153
154 struct page_pool *page_pool;
155 struct xdp_rxq_info xdp_rxq;
156 };
157
158 struct netfront_info {
159 struct list_head list;
160 struct net_device *netdev;
161
162 struct xenbus_device *xbdev;
163
164 /* Multi-queue support */
165 struct netfront_queue *queues;
166
167 /* Statistics */
168 struct netfront_stats __percpu *rx_stats;
169 struct netfront_stats __percpu *tx_stats;
170
171 /* XDP state */
172 bool netback_has_xdp_headroom;
173 bool netfront_xdp_enabled;
174
175 /* Is device behaving sane? */
176 bool broken;
177
178 atomic_t rx_gso_checksum_fixup;
179 };
180
181 struct netfront_rx_info {
182 struct xen_netif_rx_response rx;
183 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
184 };
185
186 /*
187 * Access macros for acquiring freeing slots in tx_skbs[].
188 */
189
add_id_to_list(unsigned * head,unsigned short * list,unsigned short id)190 static void add_id_to_list(unsigned *head, unsigned short *list,
191 unsigned short id)
192 {
193 list[id] = *head;
194 *head = id;
195 }
196
get_id_from_list(unsigned * head,unsigned short * list)197 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
198 {
199 unsigned int id = *head;
200
201 if (id != TX_LINK_NONE) {
202 *head = list[id];
203 list[id] = TX_LINK_NONE;
204 }
205 return id;
206 }
207
xennet_rxidx(RING_IDX idx)208 static int xennet_rxidx(RING_IDX idx)
209 {
210 return idx & (NET_RX_RING_SIZE - 1);
211 }
212
xennet_get_rx_skb(struct netfront_queue * queue,RING_IDX ri)213 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
214 RING_IDX ri)
215 {
216 int i = xennet_rxidx(ri);
217 struct sk_buff *skb = queue->rx_skbs[i];
218 queue->rx_skbs[i] = NULL;
219 return skb;
220 }
221
xennet_get_rx_ref(struct netfront_queue * queue,RING_IDX ri)222 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
223 RING_IDX ri)
224 {
225 int i = xennet_rxidx(ri);
226 grant_ref_t ref = queue->grant_rx_ref[i];
227 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
228 return ref;
229 }
230
231 #ifdef CONFIG_SYSFS
232 static const struct attribute_group xennet_dev_group;
233 #endif
234
xennet_can_sg(struct net_device * dev)235 static bool xennet_can_sg(struct net_device *dev)
236 {
237 return dev->features & NETIF_F_SG;
238 }
239
240
rx_refill_timeout(struct timer_list * t)241 static void rx_refill_timeout(struct timer_list *t)
242 {
243 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
244 napi_schedule(&queue->napi);
245 }
246
netfront_tx_slot_available(struct netfront_queue * queue)247 static int netfront_tx_slot_available(struct netfront_queue *queue)
248 {
249 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
250 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
251 }
252
xennet_maybe_wake_tx(struct netfront_queue * queue)253 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
254 {
255 struct net_device *dev = queue->info->netdev;
256 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
257
258 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
259 netfront_tx_slot_available(queue) &&
260 likely(netif_running(dev)))
261 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
262 }
263
264
xennet_alloc_one_rx_buffer(struct netfront_queue * queue)265 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
266 {
267 struct sk_buff *skb;
268 struct page *page;
269
270 skb = __netdev_alloc_skb(queue->info->netdev,
271 RX_COPY_THRESHOLD + NET_IP_ALIGN,
272 GFP_ATOMIC | __GFP_NOWARN);
273 if (unlikely(!skb))
274 return NULL;
275
276 page = page_pool_dev_alloc_pages(queue->page_pool);
277 if (unlikely(!page)) {
278 kfree_skb(skb);
279 return NULL;
280 }
281 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
282
283 /* Align ip header to a 16 bytes boundary */
284 skb_reserve(skb, NET_IP_ALIGN);
285 skb->dev = queue->info->netdev;
286
287 return skb;
288 }
289
290
xennet_alloc_rx_buffers(struct netfront_queue * queue)291 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
292 {
293 RING_IDX req_prod = queue->rx.req_prod_pvt;
294 int notify;
295 int err = 0;
296
297 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
298 return;
299
300 for (req_prod = queue->rx.req_prod_pvt;
301 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
302 req_prod++) {
303 struct sk_buff *skb;
304 unsigned short id;
305 grant_ref_t ref;
306 struct page *page;
307 struct xen_netif_rx_request *req;
308
309 skb = xennet_alloc_one_rx_buffer(queue);
310 if (!skb) {
311 err = -ENOMEM;
312 break;
313 }
314
315 id = xennet_rxidx(req_prod);
316
317 BUG_ON(queue->rx_skbs[id]);
318 queue->rx_skbs[id] = skb;
319
320 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
321 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
322 queue->grant_rx_ref[id] = ref;
323
324 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
325
326 req = RING_GET_REQUEST(&queue->rx, req_prod);
327 gnttab_page_grant_foreign_access_ref_one(ref,
328 queue->info->xbdev->otherend_id,
329 page,
330 0);
331 req->id = id;
332 req->gref = ref;
333 }
334
335 queue->rx.req_prod_pvt = req_prod;
336
337 /* Try again later if there are not enough requests or skb allocation
338 * failed.
339 * Enough requests is quantified as the sum of newly created slots and
340 * the unconsumed slots at the backend.
341 */
342 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
343 unlikely(err)) {
344 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
345 return;
346 }
347
348 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
349 if (notify)
350 notify_remote_via_irq(queue->rx_irq);
351 }
352
xennet_open(struct net_device * dev)353 static int xennet_open(struct net_device *dev)
354 {
355 struct netfront_info *np = netdev_priv(dev);
356 unsigned int num_queues = dev->real_num_tx_queues;
357 unsigned int i = 0;
358 struct netfront_queue *queue = NULL;
359
360 if (!np->queues || np->broken)
361 return -ENODEV;
362
363 for (i = 0; i < num_queues; ++i) {
364 queue = &np->queues[i];
365 napi_enable(&queue->napi);
366
367 spin_lock_bh(&queue->rx_lock);
368 if (netif_carrier_ok(dev)) {
369 xennet_alloc_rx_buffers(queue);
370 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
371 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
372 napi_schedule(&queue->napi);
373 }
374 spin_unlock_bh(&queue->rx_lock);
375 }
376
377 netif_tx_start_all_queues(dev);
378
379 return 0;
380 }
381
xennet_tx_buf_gc(struct netfront_queue * queue)382 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
383 {
384 RING_IDX cons, prod;
385 unsigned short id;
386 struct sk_buff *skb;
387 bool more_to_do;
388 bool work_done = false;
389 const struct device *dev = &queue->info->netdev->dev;
390
391 BUG_ON(!netif_carrier_ok(queue->info->netdev));
392
393 do {
394 prod = queue->tx.sring->rsp_prod;
395 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
396 dev_alert(dev, "Illegal number of responses %u\n",
397 prod - queue->tx.rsp_cons);
398 goto err;
399 }
400 rmb(); /* Ensure we see responses up to 'rp'. */
401
402 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
403 struct xen_netif_tx_response txrsp;
404
405 work_done = true;
406
407 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
408 if (txrsp.status == XEN_NETIF_RSP_NULL)
409 continue;
410
411 id = txrsp.id;
412 if (id >= RING_SIZE(&queue->tx)) {
413 dev_alert(dev,
414 "Response has incorrect id (%u)\n",
415 id);
416 goto err;
417 }
418 if (queue->tx_link[id] != TX_PENDING) {
419 dev_alert(dev,
420 "Response for inactive request\n");
421 goto err;
422 }
423
424 queue->tx_link[id] = TX_LINK_NONE;
425 skb = queue->tx_skbs[id];
426 queue->tx_skbs[id] = NULL;
427 if (unlikely(gnttab_query_foreign_access(
428 queue->grant_tx_ref[id]) != 0)) {
429 dev_alert(dev,
430 "Grant still in use by backend domain\n");
431 goto err;
432 }
433 gnttab_end_foreign_access_ref(
434 queue->grant_tx_ref[id], GNTMAP_readonly);
435 gnttab_release_grant_reference(
436 &queue->gref_tx_head, queue->grant_tx_ref[id]);
437 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
438 queue->grant_tx_page[id] = NULL;
439 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
440 dev_kfree_skb_irq(skb);
441 }
442
443 queue->tx.rsp_cons = prod;
444
445 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
446 } while (more_to_do);
447
448 xennet_maybe_wake_tx(queue);
449
450 return work_done;
451
452 err:
453 queue->info->broken = true;
454 dev_alert(dev, "Disabled for further use\n");
455
456 return work_done;
457 }
458
459 struct xennet_gnttab_make_txreq {
460 struct netfront_queue *queue;
461 struct sk_buff *skb;
462 struct page *page;
463 struct xen_netif_tx_request *tx; /* Last request on ring page */
464 struct xen_netif_tx_request tx_local; /* Last request local copy*/
465 unsigned int size;
466 };
467
xennet_tx_setup_grant(unsigned long gfn,unsigned int offset,unsigned int len,void * data)468 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
469 unsigned int len, void *data)
470 {
471 struct xennet_gnttab_make_txreq *info = data;
472 unsigned int id;
473 struct xen_netif_tx_request *tx;
474 grant_ref_t ref;
475 /* convenient aliases */
476 struct page *page = info->page;
477 struct netfront_queue *queue = info->queue;
478 struct sk_buff *skb = info->skb;
479
480 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
481 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
482 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
483 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
484
485 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
486 gfn, GNTMAP_readonly);
487
488 queue->tx_skbs[id] = skb;
489 queue->grant_tx_page[id] = page;
490 queue->grant_tx_ref[id] = ref;
491
492 info->tx_local.id = id;
493 info->tx_local.gref = ref;
494 info->tx_local.offset = offset;
495 info->tx_local.size = len;
496 info->tx_local.flags = 0;
497
498 *tx = info->tx_local;
499
500 /*
501 * Put the request in the pending queue, it will be set to be pending
502 * when the producer index is about to be raised.
503 */
504 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
505
506 info->tx = tx;
507 info->size += info->tx_local.size;
508 }
509
xennet_make_first_txreq(struct xennet_gnttab_make_txreq * info,unsigned int offset,unsigned int len)510 static struct xen_netif_tx_request *xennet_make_first_txreq(
511 struct xennet_gnttab_make_txreq *info,
512 unsigned int offset, unsigned int len)
513 {
514 info->size = 0;
515
516 gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
517
518 return info->tx;
519 }
520
xennet_make_one_txreq(unsigned long gfn,unsigned int offset,unsigned int len,void * data)521 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
522 unsigned int len, void *data)
523 {
524 struct xennet_gnttab_make_txreq *info = data;
525
526 info->tx->flags |= XEN_NETTXF_more_data;
527 skb_get(info->skb);
528 xennet_tx_setup_grant(gfn, offset, len, data);
529 }
530
xennet_make_txreqs(struct xennet_gnttab_make_txreq * info,struct page * page,unsigned int offset,unsigned int len)531 static void xennet_make_txreqs(
532 struct xennet_gnttab_make_txreq *info,
533 struct page *page,
534 unsigned int offset, unsigned int len)
535 {
536 /* Skip unused frames from start of page */
537 page += offset >> PAGE_SHIFT;
538 offset &= ~PAGE_MASK;
539
540 while (len) {
541 info->page = page;
542 info->size = 0;
543
544 gnttab_foreach_grant_in_range(page, offset, len,
545 xennet_make_one_txreq,
546 info);
547
548 page++;
549 offset = 0;
550 len -= info->size;
551 }
552 }
553
554 /*
555 * Count how many ring slots are required to send this skb. Each frag
556 * might be a compound page.
557 */
xennet_count_skb_slots(struct sk_buff * skb)558 static int xennet_count_skb_slots(struct sk_buff *skb)
559 {
560 int i, frags = skb_shinfo(skb)->nr_frags;
561 int slots;
562
563 slots = gnttab_count_grant(offset_in_page(skb->data),
564 skb_headlen(skb));
565
566 for (i = 0; i < frags; i++) {
567 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
568 unsigned long size = skb_frag_size(frag);
569 unsigned long offset = skb_frag_off(frag);
570
571 /* Skip unused frames from start of page */
572 offset &= ~PAGE_MASK;
573
574 slots += gnttab_count_grant(offset, size);
575 }
576
577 return slots;
578 }
579
xennet_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)580 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
581 struct net_device *sb_dev)
582 {
583 unsigned int num_queues = dev->real_num_tx_queues;
584 u32 hash;
585 u16 queue_idx;
586
587 /* First, check if there is only one queue */
588 if (num_queues == 1) {
589 queue_idx = 0;
590 } else {
591 hash = skb_get_hash(skb);
592 queue_idx = hash % num_queues;
593 }
594
595 return queue_idx;
596 }
597
xennet_mark_tx_pending(struct netfront_queue * queue)598 static void xennet_mark_tx_pending(struct netfront_queue *queue)
599 {
600 unsigned int i;
601
602 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
603 TX_LINK_NONE)
604 queue->tx_link[i] = TX_PENDING;
605 }
606
xennet_xdp_xmit_one(struct net_device * dev,struct netfront_queue * queue,struct xdp_frame * xdpf)607 static int xennet_xdp_xmit_one(struct net_device *dev,
608 struct netfront_queue *queue,
609 struct xdp_frame *xdpf)
610 {
611 struct netfront_info *np = netdev_priv(dev);
612 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
613 struct xennet_gnttab_make_txreq info = {
614 .queue = queue,
615 .skb = NULL,
616 .page = virt_to_page(xdpf->data),
617 };
618 int notify;
619
620 xennet_make_first_txreq(&info,
621 offset_in_page(xdpf->data),
622 xdpf->len);
623
624 xennet_mark_tx_pending(queue);
625
626 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
627 if (notify)
628 notify_remote_via_irq(queue->tx_irq);
629
630 u64_stats_update_begin(&tx_stats->syncp);
631 tx_stats->bytes += xdpf->len;
632 tx_stats->packets++;
633 u64_stats_update_end(&tx_stats->syncp);
634
635 xennet_tx_buf_gc(queue);
636
637 return 0;
638 }
639
xennet_xdp_xmit(struct net_device * dev,int n,struct xdp_frame ** frames,u32 flags)640 static int xennet_xdp_xmit(struct net_device *dev, int n,
641 struct xdp_frame **frames, u32 flags)
642 {
643 unsigned int num_queues = dev->real_num_tx_queues;
644 struct netfront_info *np = netdev_priv(dev);
645 struct netfront_queue *queue = NULL;
646 unsigned long irq_flags;
647 int drops = 0;
648 int i, err;
649
650 if (unlikely(np->broken))
651 return -ENODEV;
652 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
653 return -EINVAL;
654
655 queue = &np->queues[smp_processor_id() % num_queues];
656
657 spin_lock_irqsave(&queue->tx_lock, irq_flags);
658 for (i = 0; i < n; i++) {
659 struct xdp_frame *xdpf = frames[i];
660
661 if (!xdpf)
662 continue;
663 err = xennet_xdp_xmit_one(dev, queue, xdpf);
664 if (err) {
665 xdp_return_frame_rx_napi(xdpf);
666 drops++;
667 }
668 }
669 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
670
671 return n - drops;
672 }
673
674
675 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
676
xennet_start_xmit(struct sk_buff * skb,struct net_device * dev)677 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
678 {
679 struct netfront_info *np = netdev_priv(dev);
680 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
681 struct xen_netif_tx_request *first_tx;
682 unsigned int i;
683 int notify;
684 int slots;
685 struct page *page;
686 unsigned int offset;
687 unsigned int len;
688 unsigned long flags;
689 struct netfront_queue *queue = NULL;
690 struct xennet_gnttab_make_txreq info = { };
691 unsigned int num_queues = dev->real_num_tx_queues;
692 u16 queue_index;
693 struct sk_buff *nskb;
694
695 /* Drop the packet if no queues are set up */
696 if (num_queues < 1)
697 goto drop;
698 if (unlikely(np->broken))
699 goto drop;
700 /* Determine which queue to transmit this SKB on */
701 queue_index = skb_get_queue_mapping(skb);
702 queue = &np->queues[queue_index];
703
704 /* If skb->len is too big for wire format, drop skb and alert
705 * user about misconfiguration.
706 */
707 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
708 net_alert_ratelimited(
709 "xennet: skb->len = %u, too big for wire format\n",
710 skb->len);
711 goto drop;
712 }
713
714 slots = xennet_count_skb_slots(skb);
715 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
716 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
717 slots, skb->len);
718 if (skb_linearize(skb))
719 goto drop;
720 }
721
722 page = virt_to_page(skb->data);
723 offset = offset_in_page(skb->data);
724
725 /* The first req should be at least ETH_HLEN size or the packet will be
726 * dropped by netback.
727 */
728 if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
729 nskb = skb_copy(skb, GFP_ATOMIC);
730 if (!nskb)
731 goto drop;
732 dev_consume_skb_any(skb);
733 skb = nskb;
734 page = virt_to_page(skb->data);
735 offset = offset_in_page(skb->data);
736 }
737
738 len = skb_headlen(skb);
739
740 spin_lock_irqsave(&queue->tx_lock, flags);
741
742 if (unlikely(!netif_carrier_ok(dev) ||
743 (slots > 1 && !xennet_can_sg(dev)) ||
744 netif_needs_gso(skb, netif_skb_features(skb)))) {
745 spin_unlock_irqrestore(&queue->tx_lock, flags);
746 goto drop;
747 }
748
749 /* First request for the linear area. */
750 info.queue = queue;
751 info.skb = skb;
752 info.page = page;
753 first_tx = xennet_make_first_txreq(&info, offset, len);
754 offset += info.tx_local.size;
755 if (offset == PAGE_SIZE) {
756 page++;
757 offset = 0;
758 }
759 len -= info.tx_local.size;
760
761 if (skb->ip_summed == CHECKSUM_PARTIAL)
762 /* local packet? */
763 first_tx->flags |= XEN_NETTXF_csum_blank |
764 XEN_NETTXF_data_validated;
765 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
766 /* remote but checksummed. */
767 first_tx->flags |= XEN_NETTXF_data_validated;
768
769 /* Optional extra info after the first request. */
770 if (skb_shinfo(skb)->gso_size) {
771 struct xen_netif_extra_info *gso;
772
773 gso = (struct xen_netif_extra_info *)
774 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
775
776 first_tx->flags |= XEN_NETTXF_extra_info;
777
778 gso->u.gso.size = skb_shinfo(skb)->gso_size;
779 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
780 XEN_NETIF_GSO_TYPE_TCPV6 :
781 XEN_NETIF_GSO_TYPE_TCPV4;
782 gso->u.gso.pad = 0;
783 gso->u.gso.features = 0;
784
785 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
786 gso->flags = 0;
787 }
788
789 /* Requests for the rest of the linear area. */
790 xennet_make_txreqs(&info, page, offset, len);
791
792 /* Requests for all the frags. */
793 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
794 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
795 xennet_make_txreqs(&info, skb_frag_page(frag),
796 skb_frag_off(frag),
797 skb_frag_size(frag));
798 }
799
800 /* First request has the packet length. */
801 first_tx->size = skb->len;
802
803 /* timestamp packet in software */
804 skb_tx_timestamp(skb);
805
806 xennet_mark_tx_pending(queue);
807
808 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
809 if (notify)
810 notify_remote_via_irq(queue->tx_irq);
811
812 u64_stats_update_begin(&tx_stats->syncp);
813 tx_stats->bytes += skb->len;
814 tx_stats->packets++;
815 u64_stats_update_end(&tx_stats->syncp);
816
817 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
818 xennet_tx_buf_gc(queue);
819
820 if (!netfront_tx_slot_available(queue))
821 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
822
823 spin_unlock_irqrestore(&queue->tx_lock, flags);
824
825 return NETDEV_TX_OK;
826
827 drop:
828 dev->stats.tx_dropped++;
829 dev_kfree_skb_any(skb);
830 return NETDEV_TX_OK;
831 }
832
xennet_close(struct net_device * dev)833 static int xennet_close(struct net_device *dev)
834 {
835 struct netfront_info *np = netdev_priv(dev);
836 unsigned int num_queues = dev->real_num_tx_queues;
837 unsigned int i;
838 struct netfront_queue *queue;
839 netif_tx_stop_all_queues(np->netdev);
840 for (i = 0; i < num_queues; ++i) {
841 queue = &np->queues[i];
842 napi_disable(&queue->napi);
843 }
844 return 0;
845 }
846
xennet_set_rx_rsp_cons(struct netfront_queue * queue,RING_IDX val)847 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
848 {
849 unsigned long flags;
850
851 spin_lock_irqsave(&queue->rx_cons_lock, flags);
852 queue->rx.rsp_cons = val;
853 queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
854 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
855 }
856
xennet_move_rx_slot(struct netfront_queue * queue,struct sk_buff * skb,grant_ref_t ref)857 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
858 grant_ref_t ref)
859 {
860 int new = xennet_rxidx(queue->rx.req_prod_pvt);
861
862 BUG_ON(queue->rx_skbs[new]);
863 queue->rx_skbs[new] = skb;
864 queue->grant_rx_ref[new] = ref;
865 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
866 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
867 queue->rx.req_prod_pvt++;
868 }
869
xennet_get_extras(struct netfront_queue * queue,struct xen_netif_extra_info * extras,RING_IDX rp)870 static int xennet_get_extras(struct netfront_queue *queue,
871 struct xen_netif_extra_info *extras,
872 RING_IDX rp)
873
874 {
875 struct xen_netif_extra_info extra;
876 struct device *dev = &queue->info->netdev->dev;
877 RING_IDX cons = queue->rx.rsp_cons;
878 int err = 0;
879
880 do {
881 struct sk_buff *skb;
882 grant_ref_t ref;
883
884 if (unlikely(cons + 1 == rp)) {
885 if (net_ratelimit())
886 dev_warn(dev, "Missing extra info\n");
887 err = -EBADR;
888 break;
889 }
890
891 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
892
893 if (unlikely(!extra.type ||
894 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
895 if (net_ratelimit())
896 dev_warn(dev, "Invalid extra type: %d\n",
897 extra.type);
898 err = -EINVAL;
899 } else {
900 extras[extra.type - 1] = extra;
901 }
902
903 skb = xennet_get_rx_skb(queue, cons);
904 ref = xennet_get_rx_ref(queue, cons);
905 xennet_move_rx_slot(queue, skb, ref);
906 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
907
908 xennet_set_rx_rsp_cons(queue, cons);
909 return err;
910 }
911
xennet_run_xdp(struct netfront_queue * queue,struct page * pdata,struct xen_netif_rx_response * rx,struct bpf_prog * prog,struct xdp_buff * xdp,bool * need_xdp_flush)912 static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
913 struct xen_netif_rx_response *rx, struct bpf_prog *prog,
914 struct xdp_buff *xdp, bool *need_xdp_flush)
915 {
916 struct xdp_frame *xdpf;
917 u32 len = rx->status;
918 u32 act;
919 int err;
920
921 xdp->data_hard_start = page_address(pdata);
922 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
923 xdp_set_data_meta_invalid(xdp);
924 xdp->data_end = xdp->data + len;
925 xdp->rxq = &queue->xdp_rxq;
926 xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
927
928 act = bpf_prog_run_xdp(prog, xdp);
929 switch (act) {
930 case XDP_TX:
931 get_page(pdata);
932 xdpf = xdp_convert_buff_to_frame(xdp);
933 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
934 if (unlikely(err < 0))
935 trace_xdp_exception(queue->info->netdev, prog, act);
936 break;
937 case XDP_REDIRECT:
938 get_page(pdata);
939 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
940 *need_xdp_flush = true;
941 if (unlikely(err))
942 trace_xdp_exception(queue->info->netdev, prog, act);
943 break;
944 case XDP_PASS:
945 case XDP_DROP:
946 break;
947
948 case XDP_ABORTED:
949 trace_xdp_exception(queue->info->netdev, prog, act);
950 break;
951
952 default:
953 bpf_warn_invalid_xdp_action(act);
954 }
955
956 return act;
957 }
958
xennet_get_responses(struct netfront_queue * queue,struct netfront_rx_info * rinfo,RING_IDX rp,struct sk_buff_head * list,bool * need_xdp_flush)959 static int xennet_get_responses(struct netfront_queue *queue,
960 struct netfront_rx_info *rinfo, RING_IDX rp,
961 struct sk_buff_head *list,
962 bool *need_xdp_flush)
963 {
964 struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
965 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
966 RING_IDX cons = queue->rx.rsp_cons;
967 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
968 struct xen_netif_extra_info *extras = rinfo->extras;
969 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
970 struct device *dev = &queue->info->netdev->dev;
971 struct bpf_prog *xdp_prog;
972 struct xdp_buff xdp;
973 unsigned long ret;
974 int slots = 1;
975 int err = 0;
976 u32 verdict;
977
978 if (rx->flags & XEN_NETRXF_extra_info) {
979 err = xennet_get_extras(queue, extras, rp);
980 if (!err) {
981 if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
982 struct xen_netif_extra_info *xdp;
983
984 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
985 rx->offset = xdp->u.xdp.headroom;
986 }
987 }
988 cons = queue->rx.rsp_cons;
989 }
990
991 for (;;) {
992 if (unlikely(rx->status < 0 ||
993 rx->offset + rx->status > XEN_PAGE_SIZE)) {
994 if (net_ratelimit())
995 dev_warn(dev, "rx->offset: %u, size: %d\n",
996 rx->offset, rx->status);
997 xennet_move_rx_slot(queue, skb, ref);
998 err = -EINVAL;
999 goto next;
1000 }
1001
1002 /*
1003 * This definitely indicates a bug, either in this driver or in
1004 * the backend driver. In future this should flag the bad
1005 * situation to the system controller to reboot the backend.
1006 */
1007 if (ref == GRANT_INVALID_REF) {
1008 if (net_ratelimit())
1009 dev_warn(dev, "Bad rx response id %d.\n",
1010 rx->id);
1011 err = -EINVAL;
1012 goto next;
1013 }
1014
1015 ret = gnttab_end_foreign_access_ref(ref, 0);
1016 BUG_ON(!ret);
1017
1018 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1019
1020 rcu_read_lock();
1021 xdp_prog = rcu_dereference(queue->xdp_prog);
1022 if (xdp_prog) {
1023 if (!(rx->flags & XEN_NETRXF_more_data)) {
1024 /* currently only a single page contains data */
1025 verdict = xennet_run_xdp(queue,
1026 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1027 rx, xdp_prog, &xdp, need_xdp_flush);
1028 if (verdict != XDP_PASS)
1029 err = -EINVAL;
1030 } else {
1031 /* drop the frame */
1032 err = -EINVAL;
1033 }
1034 }
1035 rcu_read_unlock();
1036 next:
1037 __skb_queue_tail(list, skb);
1038 if (!(rx->flags & XEN_NETRXF_more_data))
1039 break;
1040
1041 if (cons + slots == rp) {
1042 if (net_ratelimit())
1043 dev_warn(dev, "Need more slots\n");
1044 err = -ENOENT;
1045 break;
1046 }
1047
1048 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1049 rx = &rx_local;
1050 skb = xennet_get_rx_skb(queue, cons + slots);
1051 ref = xennet_get_rx_ref(queue, cons + slots);
1052 slots++;
1053 }
1054
1055 if (unlikely(slots > max)) {
1056 if (net_ratelimit())
1057 dev_warn(dev, "Too many slots\n");
1058 err = -E2BIG;
1059 }
1060
1061 if (unlikely(err))
1062 xennet_set_rx_rsp_cons(queue, cons + slots);
1063
1064 return err;
1065 }
1066
xennet_set_skb_gso(struct sk_buff * skb,struct xen_netif_extra_info * gso)1067 static int xennet_set_skb_gso(struct sk_buff *skb,
1068 struct xen_netif_extra_info *gso)
1069 {
1070 if (!gso->u.gso.size) {
1071 if (net_ratelimit())
1072 pr_warn("GSO size must not be zero\n");
1073 return -EINVAL;
1074 }
1075
1076 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1077 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1078 if (net_ratelimit())
1079 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1080 return -EINVAL;
1081 }
1082
1083 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1084 skb_shinfo(skb)->gso_type =
1085 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1086 SKB_GSO_TCPV4 :
1087 SKB_GSO_TCPV6;
1088
1089 /* Header must be checked, and gso_segs computed. */
1090 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1091 skb_shinfo(skb)->gso_segs = 0;
1092
1093 return 0;
1094 }
1095
xennet_fill_frags(struct netfront_queue * queue,struct sk_buff * skb,struct sk_buff_head * list)1096 static int xennet_fill_frags(struct netfront_queue *queue,
1097 struct sk_buff *skb,
1098 struct sk_buff_head *list)
1099 {
1100 RING_IDX cons = queue->rx.rsp_cons;
1101 struct sk_buff *nskb;
1102
1103 while ((nskb = __skb_dequeue(list))) {
1104 struct xen_netif_rx_response rx;
1105 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1106
1107 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1108
1109 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1110 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1111
1112 BUG_ON(pull_to < skb_headlen(skb));
1113 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1114 }
1115 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1116 xennet_set_rx_rsp_cons(queue,
1117 ++cons + skb_queue_len(list));
1118 kfree_skb(nskb);
1119 return -ENOENT;
1120 }
1121
1122 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1123 skb_frag_page(nfrag),
1124 rx.offset, rx.status, PAGE_SIZE);
1125
1126 skb_shinfo(nskb)->nr_frags = 0;
1127 kfree_skb(nskb);
1128 }
1129
1130 xennet_set_rx_rsp_cons(queue, cons);
1131
1132 return 0;
1133 }
1134
checksum_setup(struct net_device * dev,struct sk_buff * skb)1135 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1136 {
1137 bool recalculate_partial_csum = false;
1138
1139 /*
1140 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1141 * peers can fail to set NETRXF_csum_blank when sending a GSO
1142 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1143 * recalculate the partial checksum.
1144 */
1145 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1146 struct netfront_info *np = netdev_priv(dev);
1147 atomic_inc(&np->rx_gso_checksum_fixup);
1148 skb->ip_summed = CHECKSUM_PARTIAL;
1149 recalculate_partial_csum = true;
1150 }
1151
1152 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1153 if (skb->ip_summed != CHECKSUM_PARTIAL)
1154 return 0;
1155
1156 return skb_checksum_setup(skb, recalculate_partial_csum);
1157 }
1158
handle_incoming_queue(struct netfront_queue * queue,struct sk_buff_head * rxq)1159 static int handle_incoming_queue(struct netfront_queue *queue,
1160 struct sk_buff_head *rxq)
1161 {
1162 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1163 int packets_dropped = 0;
1164 struct sk_buff *skb;
1165
1166 while ((skb = __skb_dequeue(rxq)) != NULL) {
1167 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1168
1169 if (pull_to > skb_headlen(skb))
1170 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1171
1172 /* Ethernet work: Delayed to here as it peeks the header. */
1173 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1174 skb_reset_network_header(skb);
1175
1176 if (checksum_setup(queue->info->netdev, skb)) {
1177 kfree_skb(skb);
1178 packets_dropped++;
1179 queue->info->netdev->stats.rx_errors++;
1180 continue;
1181 }
1182
1183 u64_stats_update_begin(&rx_stats->syncp);
1184 rx_stats->packets++;
1185 rx_stats->bytes += skb->len;
1186 u64_stats_update_end(&rx_stats->syncp);
1187
1188 /* Pass it up. */
1189 napi_gro_receive(&queue->napi, skb);
1190 }
1191
1192 return packets_dropped;
1193 }
1194
xennet_poll(struct napi_struct * napi,int budget)1195 static int xennet_poll(struct napi_struct *napi, int budget)
1196 {
1197 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1198 struct net_device *dev = queue->info->netdev;
1199 struct sk_buff *skb;
1200 struct netfront_rx_info rinfo;
1201 struct xen_netif_rx_response *rx = &rinfo.rx;
1202 struct xen_netif_extra_info *extras = rinfo.extras;
1203 RING_IDX i, rp;
1204 int work_done;
1205 struct sk_buff_head rxq;
1206 struct sk_buff_head errq;
1207 struct sk_buff_head tmpq;
1208 int err;
1209 bool need_xdp_flush = false;
1210
1211 spin_lock(&queue->rx_lock);
1212
1213 skb_queue_head_init(&rxq);
1214 skb_queue_head_init(&errq);
1215 skb_queue_head_init(&tmpq);
1216
1217 rp = queue->rx.sring->rsp_prod;
1218 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1219 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1220 rp - queue->rx.rsp_cons);
1221 queue->info->broken = true;
1222 spin_unlock(&queue->rx_lock);
1223 return 0;
1224 }
1225 rmb(); /* Ensure we see queued responses up to 'rp'. */
1226
1227 i = queue->rx.rsp_cons;
1228 work_done = 0;
1229 while ((i != rp) && (work_done < budget)) {
1230 RING_COPY_RESPONSE(&queue->rx, i, rx);
1231 memset(extras, 0, sizeof(rinfo.extras));
1232
1233 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1234 &need_xdp_flush);
1235
1236 if (unlikely(err)) {
1237 err:
1238 while ((skb = __skb_dequeue(&tmpq)))
1239 __skb_queue_tail(&errq, skb);
1240 dev->stats.rx_errors++;
1241 i = queue->rx.rsp_cons;
1242 continue;
1243 }
1244
1245 skb = __skb_dequeue(&tmpq);
1246
1247 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1248 struct xen_netif_extra_info *gso;
1249 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1250
1251 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1252 __skb_queue_head(&tmpq, skb);
1253 xennet_set_rx_rsp_cons(queue,
1254 queue->rx.rsp_cons +
1255 skb_queue_len(&tmpq));
1256 goto err;
1257 }
1258 }
1259
1260 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1261 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1262 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1263
1264 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1265 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1266 skb->data_len = rx->status;
1267 skb->len += rx->status;
1268
1269 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1270 goto err;
1271
1272 if (rx->flags & XEN_NETRXF_csum_blank)
1273 skb->ip_summed = CHECKSUM_PARTIAL;
1274 else if (rx->flags & XEN_NETRXF_data_validated)
1275 skb->ip_summed = CHECKSUM_UNNECESSARY;
1276
1277 __skb_queue_tail(&rxq, skb);
1278
1279 i = queue->rx.rsp_cons + 1;
1280 xennet_set_rx_rsp_cons(queue, i);
1281 work_done++;
1282 }
1283 if (need_xdp_flush)
1284 xdp_do_flush();
1285
1286 __skb_queue_purge(&errq);
1287
1288 work_done -= handle_incoming_queue(queue, &rxq);
1289
1290 xennet_alloc_rx_buffers(queue);
1291
1292 if (work_done < budget) {
1293 int more_to_do = 0;
1294
1295 napi_complete_done(napi, work_done);
1296
1297 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1298 if (more_to_do)
1299 napi_schedule(napi);
1300 }
1301
1302 spin_unlock(&queue->rx_lock);
1303
1304 return work_done;
1305 }
1306
xennet_change_mtu(struct net_device * dev,int mtu)1307 static int xennet_change_mtu(struct net_device *dev, int mtu)
1308 {
1309 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1310
1311 if (mtu > max)
1312 return -EINVAL;
1313 dev->mtu = mtu;
1314 return 0;
1315 }
1316
xennet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * tot)1317 static void xennet_get_stats64(struct net_device *dev,
1318 struct rtnl_link_stats64 *tot)
1319 {
1320 struct netfront_info *np = netdev_priv(dev);
1321 int cpu;
1322
1323 for_each_possible_cpu(cpu) {
1324 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1325 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1326 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1327 unsigned int start;
1328
1329 do {
1330 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1331 tx_packets = tx_stats->packets;
1332 tx_bytes = tx_stats->bytes;
1333 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1334
1335 do {
1336 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1337 rx_packets = rx_stats->packets;
1338 rx_bytes = rx_stats->bytes;
1339 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1340
1341 tot->rx_packets += rx_packets;
1342 tot->tx_packets += tx_packets;
1343 tot->rx_bytes += rx_bytes;
1344 tot->tx_bytes += tx_bytes;
1345 }
1346
1347 tot->rx_errors = dev->stats.rx_errors;
1348 tot->tx_dropped = dev->stats.tx_dropped;
1349 }
1350
xennet_release_tx_bufs(struct netfront_queue * queue)1351 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1352 {
1353 struct sk_buff *skb;
1354 int i;
1355
1356 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1357 /* Skip over entries which are actually freelist references */
1358 if (!queue->tx_skbs[i])
1359 continue;
1360
1361 skb = queue->tx_skbs[i];
1362 queue->tx_skbs[i] = NULL;
1363 get_page(queue->grant_tx_page[i]);
1364 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1365 GNTMAP_readonly,
1366 (unsigned long)page_address(queue->grant_tx_page[i]));
1367 queue->grant_tx_page[i] = NULL;
1368 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1369 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1370 dev_kfree_skb_irq(skb);
1371 }
1372 }
1373
xennet_release_rx_bufs(struct netfront_queue * queue)1374 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1375 {
1376 int id, ref;
1377
1378 spin_lock_bh(&queue->rx_lock);
1379
1380 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1381 struct sk_buff *skb;
1382 struct page *page;
1383
1384 skb = queue->rx_skbs[id];
1385 if (!skb)
1386 continue;
1387
1388 ref = queue->grant_rx_ref[id];
1389 if (ref == GRANT_INVALID_REF)
1390 continue;
1391
1392 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1393
1394 /* gnttab_end_foreign_access() needs a page ref until
1395 * foreign access is ended (which may be deferred).
1396 */
1397 get_page(page);
1398 gnttab_end_foreign_access(ref, 0,
1399 (unsigned long)page_address(page));
1400 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1401
1402 kfree_skb(skb);
1403 }
1404
1405 spin_unlock_bh(&queue->rx_lock);
1406 }
1407
xennet_fix_features(struct net_device * dev,netdev_features_t features)1408 static netdev_features_t xennet_fix_features(struct net_device *dev,
1409 netdev_features_t features)
1410 {
1411 struct netfront_info *np = netdev_priv(dev);
1412
1413 if (features & NETIF_F_SG &&
1414 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1415 features &= ~NETIF_F_SG;
1416
1417 if (features & NETIF_F_IPV6_CSUM &&
1418 !xenbus_read_unsigned(np->xbdev->otherend,
1419 "feature-ipv6-csum-offload", 0))
1420 features &= ~NETIF_F_IPV6_CSUM;
1421
1422 if (features & NETIF_F_TSO &&
1423 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1424 features &= ~NETIF_F_TSO;
1425
1426 if (features & NETIF_F_TSO6 &&
1427 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1428 features &= ~NETIF_F_TSO6;
1429
1430 return features;
1431 }
1432
xennet_set_features(struct net_device * dev,netdev_features_t features)1433 static int xennet_set_features(struct net_device *dev,
1434 netdev_features_t features)
1435 {
1436 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1437 netdev_info(dev, "Reducing MTU because no SG offload");
1438 dev->mtu = ETH_DATA_LEN;
1439 }
1440
1441 return 0;
1442 }
1443
xennet_handle_tx(struct netfront_queue * queue,unsigned int * eoi)1444 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1445 {
1446 unsigned long flags;
1447
1448 if (unlikely(queue->info->broken))
1449 return false;
1450
1451 spin_lock_irqsave(&queue->tx_lock, flags);
1452 if (xennet_tx_buf_gc(queue))
1453 *eoi = 0;
1454 spin_unlock_irqrestore(&queue->tx_lock, flags);
1455
1456 return true;
1457 }
1458
xennet_tx_interrupt(int irq,void * dev_id)1459 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1460 {
1461 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1462
1463 if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1464 xen_irq_lateeoi(irq, eoiflag);
1465
1466 return IRQ_HANDLED;
1467 }
1468
xennet_handle_rx(struct netfront_queue * queue,unsigned int * eoi)1469 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1470 {
1471 unsigned int work_queued;
1472 unsigned long flags;
1473
1474 if (unlikely(queue->info->broken))
1475 return false;
1476
1477 spin_lock_irqsave(&queue->rx_cons_lock, flags);
1478 work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1479 if (work_queued > queue->rx_rsp_unconsumed) {
1480 queue->rx_rsp_unconsumed = work_queued;
1481 *eoi = 0;
1482 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1483 const struct device *dev = &queue->info->netdev->dev;
1484
1485 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1486 dev_alert(dev, "RX producer index going backwards\n");
1487 dev_alert(dev, "Disabled for further use\n");
1488 queue->info->broken = true;
1489 return false;
1490 }
1491 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1492
1493 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1494 napi_schedule(&queue->napi);
1495
1496 return true;
1497 }
1498
xennet_rx_interrupt(int irq,void * dev_id)1499 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1500 {
1501 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1502
1503 if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1504 xen_irq_lateeoi(irq, eoiflag);
1505
1506 return IRQ_HANDLED;
1507 }
1508
xennet_interrupt(int irq,void * dev_id)1509 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1510 {
1511 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1512
1513 if (xennet_handle_tx(dev_id, &eoiflag) &&
1514 xennet_handle_rx(dev_id, &eoiflag))
1515 xen_irq_lateeoi(irq, eoiflag);
1516
1517 return IRQ_HANDLED;
1518 }
1519
1520 #ifdef CONFIG_NET_POLL_CONTROLLER
xennet_poll_controller(struct net_device * dev)1521 static void xennet_poll_controller(struct net_device *dev)
1522 {
1523 /* Poll each queue */
1524 struct netfront_info *info = netdev_priv(dev);
1525 unsigned int num_queues = dev->real_num_tx_queues;
1526 unsigned int i;
1527
1528 if (info->broken)
1529 return;
1530
1531 for (i = 0; i < num_queues; ++i)
1532 xennet_interrupt(0, &info->queues[i]);
1533 }
1534 #endif
1535
1536 #define NETBACK_XDP_HEADROOM_DISABLE 0
1537 #define NETBACK_XDP_HEADROOM_ENABLE 1
1538
talk_to_netback_xdp(struct netfront_info * np,int xdp)1539 static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1540 {
1541 int err;
1542 unsigned short headroom;
1543
1544 headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1545 err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1546 "xdp-headroom", "%hu",
1547 headroom);
1548 if (err)
1549 pr_warn("Error writing xdp-headroom\n");
1550
1551 return err;
1552 }
1553
xennet_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack)1554 static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1555 struct netlink_ext_ack *extack)
1556 {
1557 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1558 struct netfront_info *np = netdev_priv(dev);
1559 struct bpf_prog *old_prog;
1560 unsigned int i, err;
1561
1562 if (dev->mtu > max_mtu) {
1563 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1564 return -EINVAL;
1565 }
1566
1567 if (!np->netback_has_xdp_headroom)
1568 return 0;
1569
1570 xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1571
1572 err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1573 NETBACK_XDP_HEADROOM_DISABLE);
1574 if (err)
1575 return err;
1576
1577 /* avoid the race with XDP headroom adjustment */
1578 wait_event(module_wq,
1579 xenbus_read_driver_state(np->xbdev->otherend) ==
1580 XenbusStateReconfigured);
1581 np->netfront_xdp_enabled = true;
1582
1583 old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1584
1585 if (prog)
1586 bpf_prog_add(prog, dev->real_num_tx_queues);
1587
1588 for (i = 0; i < dev->real_num_tx_queues; ++i)
1589 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1590
1591 if (old_prog)
1592 for (i = 0; i < dev->real_num_tx_queues; ++i)
1593 bpf_prog_put(old_prog);
1594
1595 xenbus_switch_state(np->xbdev, XenbusStateConnected);
1596
1597 return 0;
1598 }
1599
xennet_xdp(struct net_device * dev,struct netdev_bpf * xdp)1600 static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1601 {
1602 struct netfront_info *np = netdev_priv(dev);
1603
1604 if (np->broken)
1605 return -ENODEV;
1606
1607 switch (xdp->command) {
1608 case XDP_SETUP_PROG:
1609 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1610 default:
1611 return -EINVAL;
1612 }
1613 }
1614
1615 static const struct net_device_ops xennet_netdev_ops = {
1616 .ndo_open = xennet_open,
1617 .ndo_stop = xennet_close,
1618 .ndo_start_xmit = xennet_start_xmit,
1619 .ndo_change_mtu = xennet_change_mtu,
1620 .ndo_get_stats64 = xennet_get_stats64,
1621 .ndo_set_mac_address = eth_mac_addr,
1622 .ndo_validate_addr = eth_validate_addr,
1623 .ndo_fix_features = xennet_fix_features,
1624 .ndo_set_features = xennet_set_features,
1625 .ndo_select_queue = xennet_select_queue,
1626 .ndo_bpf = xennet_xdp,
1627 .ndo_xdp_xmit = xennet_xdp_xmit,
1628 #ifdef CONFIG_NET_POLL_CONTROLLER
1629 .ndo_poll_controller = xennet_poll_controller,
1630 #endif
1631 };
1632
xennet_free_netdev(struct net_device * netdev)1633 static void xennet_free_netdev(struct net_device *netdev)
1634 {
1635 struct netfront_info *np = netdev_priv(netdev);
1636
1637 free_percpu(np->rx_stats);
1638 free_percpu(np->tx_stats);
1639 free_netdev(netdev);
1640 }
1641
xennet_create_dev(struct xenbus_device * dev)1642 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1643 {
1644 int err;
1645 struct net_device *netdev;
1646 struct netfront_info *np;
1647
1648 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1649 if (!netdev)
1650 return ERR_PTR(-ENOMEM);
1651
1652 np = netdev_priv(netdev);
1653 np->xbdev = dev;
1654
1655 np->queues = NULL;
1656
1657 err = -ENOMEM;
1658 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1659 if (np->rx_stats == NULL)
1660 goto exit;
1661 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1662 if (np->tx_stats == NULL)
1663 goto exit;
1664
1665 netdev->netdev_ops = &xennet_netdev_ops;
1666
1667 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1668 NETIF_F_GSO_ROBUST;
1669 netdev->hw_features = NETIF_F_SG |
1670 NETIF_F_IPV6_CSUM |
1671 NETIF_F_TSO | NETIF_F_TSO6;
1672
1673 /*
1674 * Assume that all hw features are available for now. This set
1675 * will be adjusted by the call to netdev_update_features() in
1676 * xennet_connect() which is the earliest point where we can
1677 * negotiate with the backend regarding supported features.
1678 */
1679 netdev->features |= netdev->hw_features;
1680
1681 netdev->ethtool_ops = &xennet_ethtool_ops;
1682 netdev->min_mtu = ETH_MIN_MTU;
1683 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1684 SET_NETDEV_DEV(netdev, &dev->dev);
1685
1686 np->netdev = netdev;
1687 np->netfront_xdp_enabled = false;
1688
1689 netif_carrier_off(netdev);
1690
1691 do {
1692 xenbus_switch_state(dev, XenbusStateInitialising);
1693 err = wait_event_timeout(module_wq,
1694 xenbus_read_driver_state(dev->otherend) !=
1695 XenbusStateClosed &&
1696 xenbus_read_driver_state(dev->otherend) !=
1697 XenbusStateUnknown, XENNET_TIMEOUT);
1698 } while (!err);
1699
1700 return netdev;
1701
1702 exit:
1703 xennet_free_netdev(netdev);
1704 return ERR_PTR(err);
1705 }
1706
1707 /**
1708 * Entry point to this code when a new device is created. Allocate the basic
1709 * structures and the ring buffers for communication with the backend, and
1710 * inform the backend of the appropriate details for those.
1711 */
netfront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)1712 static int netfront_probe(struct xenbus_device *dev,
1713 const struct xenbus_device_id *id)
1714 {
1715 int err;
1716 struct net_device *netdev;
1717 struct netfront_info *info;
1718
1719 netdev = xennet_create_dev(dev);
1720 if (IS_ERR(netdev)) {
1721 err = PTR_ERR(netdev);
1722 xenbus_dev_fatal(dev, err, "creating netdev");
1723 return err;
1724 }
1725
1726 info = netdev_priv(netdev);
1727 dev_set_drvdata(&dev->dev, info);
1728 #ifdef CONFIG_SYSFS
1729 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1730 #endif
1731
1732 return 0;
1733 }
1734
xennet_end_access(int ref,void * page)1735 static void xennet_end_access(int ref, void *page)
1736 {
1737 /* This frees the page as a side-effect */
1738 if (ref != GRANT_INVALID_REF)
1739 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1740 }
1741
xennet_disconnect_backend(struct netfront_info * info)1742 static void xennet_disconnect_backend(struct netfront_info *info)
1743 {
1744 unsigned int i = 0;
1745 unsigned int num_queues = info->netdev->real_num_tx_queues;
1746
1747 netif_carrier_off(info->netdev);
1748
1749 for (i = 0; i < num_queues && info->queues; ++i) {
1750 struct netfront_queue *queue = &info->queues[i];
1751
1752 del_timer_sync(&queue->rx_refill_timer);
1753
1754 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1755 unbind_from_irqhandler(queue->tx_irq, queue);
1756 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1757 unbind_from_irqhandler(queue->tx_irq, queue);
1758 unbind_from_irqhandler(queue->rx_irq, queue);
1759 }
1760 queue->tx_evtchn = queue->rx_evtchn = 0;
1761 queue->tx_irq = queue->rx_irq = 0;
1762
1763 if (netif_running(info->netdev))
1764 napi_synchronize(&queue->napi);
1765
1766 xennet_release_tx_bufs(queue);
1767 xennet_release_rx_bufs(queue);
1768 gnttab_free_grant_references(queue->gref_tx_head);
1769 gnttab_free_grant_references(queue->gref_rx_head);
1770
1771 /* End access and free the pages */
1772 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1773 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1774
1775 queue->tx_ring_ref = GRANT_INVALID_REF;
1776 queue->rx_ring_ref = GRANT_INVALID_REF;
1777 queue->tx.sring = NULL;
1778 queue->rx.sring = NULL;
1779
1780 page_pool_destroy(queue->page_pool);
1781 }
1782 }
1783
1784 /**
1785 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1786 * driver restart. We tear down our netif structure and recreate it, but
1787 * leave the device-layer structures intact so that this is transparent to the
1788 * rest of the kernel.
1789 */
netfront_resume(struct xenbus_device * dev)1790 static int netfront_resume(struct xenbus_device *dev)
1791 {
1792 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1793
1794 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1795
1796 netif_tx_lock_bh(info->netdev);
1797 netif_device_detach(info->netdev);
1798 netif_tx_unlock_bh(info->netdev);
1799
1800 xennet_disconnect_backend(info);
1801 return 0;
1802 }
1803
xen_net_read_mac(struct xenbus_device * dev,u8 mac[])1804 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1805 {
1806 char *s, *e, *macstr;
1807 int i;
1808
1809 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1810 if (IS_ERR(macstr))
1811 return PTR_ERR(macstr);
1812
1813 for (i = 0; i < ETH_ALEN; i++) {
1814 mac[i] = simple_strtoul(s, &e, 16);
1815 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1816 kfree(macstr);
1817 return -ENOENT;
1818 }
1819 s = e+1;
1820 }
1821
1822 kfree(macstr);
1823 return 0;
1824 }
1825
setup_netfront_single(struct netfront_queue * queue)1826 static int setup_netfront_single(struct netfront_queue *queue)
1827 {
1828 int err;
1829
1830 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1831 if (err < 0)
1832 goto fail;
1833
1834 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1835 xennet_interrupt, 0,
1836 queue->info->netdev->name,
1837 queue);
1838 if (err < 0)
1839 goto bind_fail;
1840 queue->rx_evtchn = queue->tx_evtchn;
1841 queue->rx_irq = queue->tx_irq = err;
1842
1843 return 0;
1844
1845 bind_fail:
1846 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1847 queue->tx_evtchn = 0;
1848 fail:
1849 return err;
1850 }
1851
setup_netfront_split(struct netfront_queue * queue)1852 static int setup_netfront_split(struct netfront_queue *queue)
1853 {
1854 int err;
1855
1856 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1857 if (err < 0)
1858 goto fail;
1859 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1860 if (err < 0)
1861 goto alloc_rx_evtchn_fail;
1862
1863 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1864 "%s-tx", queue->name);
1865 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1866 xennet_tx_interrupt, 0,
1867 queue->tx_irq_name, queue);
1868 if (err < 0)
1869 goto bind_tx_fail;
1870 queue->tx_irq = err;
1871
1872 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1873 "%s-rx", queue->name);
1874 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1875 xennet_rx_interrupt, 0,
1876 queue->rx_irq_name, queue);
1877 if (err < 0)
1878 goto bind_rx_fail;
1879 queue->rx_irq = err;
1880
1881 return 0;
1882
1883 bind_rx_fail:
1884 unbind_from_irqhandler(queue->tx_irq, queue);
1885 queue->tx_irq = 0;
1886 bind_tx_fail:
1887 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1888 queue->rx_evtchn = 0;
1889 alloc_rx_evtchn_fail:
1890 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1891 queue->tx_evtchn = 0;
1892 fail:
1893 return err;
1894 }
1895
setup_netfront(struct xenbus_device * dev,struct netfront_queue * queue,unsigned int feature_split_evtchn)1896 static int setup_netfront(struct xenbus_device *dev,
1897 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1898 {
1899 struct xen_netif_tx_sring *txs;
1900 struct xen_netif_rx_sring *rxs;
1901 grant_ref_t gref;
1902 int err;
1903
1904 queue->tx_ring_ref = GRANT_INVALID_REF;
1905 queue->rx_ring_ref = GRANT_INVALID_REF;
1906 queue->rx.sring = NULL;
1907 queue->tx.sring = NULL;
1908
1909 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1910 if (!txs) {
1911 err = -ENOMEM;
1912 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1913 goto fail;
1914 }
1915 SHARED_RING_INIT(txs);
1916 FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1917
1918 err = xenbus_grant_ring(dev, txs, 1, &gref);
1919 if (err < 0)
1920 goto grant_tx_ring_fail;
1921 queue->tx_ring_ref = gref;
1922
1923 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1924 if (!rxs) {
1925 err = -ENOMEM;
1926 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1927 goto alloc_rx_ring_fail;
1928 }
1929 SHARED_RING_INIT(rxs);
1930 FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1931
1932 err = xenbus_grant_ring(dev, rxs, 1, &gref);
1933 if (err < 0)
1934 goto grant_rx_ring_fail;
1935 queue->rx_ring_ref = gref;
1936
1937 if (feature_split_evtchn)
1938 err = setup_netfront_split(queue);
1939 /* setup single event channel if
1940 * a) feature-split-event-channels == 0
1941 * b) feature-split-event-channels == 1 but failed to setup
1942 */
1943 if (!feature_split_evtchn || (feature_split_evtchn && err))
1944 err = setup_netfront_single(queue);
1945
1946 if (err)
1947 goto alloc_evtchn_fail;
1948
1949 return 0;
1950
1951 /* If we fail to setup netfront, it is safe to just revoke access to
1952 * granted pages because backend is not accessing it at this point.
1953 */
1954 alloc_evtchn_fail:
1955 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1956 grant_rx_ring_fail:
1957 free_page((unsigned long)rxs);
1958 alloc_rx_ring_fail:
1959 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1960 grant_tx_ring_fail:
1961 free_page((unsigned long)txs);
1962 fail:
1963 return err;
1964 }
1965
1966 /* Queue-specific initialisation
1967 * This used to be done in xennet_create_dev() but must now
1968 * be run per-queue.
1969 */
xennet_init_queue(struct netfront_queue * queue)1970 static int xennet_init_queue(struct netfront_queue *queue)
1971 {
1972 unsigned short i;
1973 int err = 0;
1974 char *devid;
1975
1976 spin_lock_init(&queue->tx_lock);
1977 spin_lock_init(&queue->rx_lock);
1978 spin_lock_init(&queue->rx_cons_lock);
1979
1980 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1981
1982 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1983 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1984 devid, queue->id);
1985
1986 /* Initialise tx_skb_freelist as a free chain containing every entry. */
1987 queue->tx_skb_freelist = 0;
1988 queue->tx_pend_queue = TX_LINK_NONE;
1989 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1990 queue->tx_link[i] = i + 1;
1991 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1992 queue->grant_tx_page[i] = NULL;
1993 }
1994 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1995
1996 /* Clear out rx_skbs */
1997 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1998 queue->rx_skbs[i] = NULL;
1999 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
2000 }
2001
2002 /* A grant for every tx ring slot */
2003 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2004 &queue->gref_tx_head) < 0) {
2005 pr_alert("can't alloc tx grant refs\n");
2006 err = -ENOMEM;
2007 goto exit;
2008 }
2009
2010 /* A grant for every rx ring slot */
2011 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2012 &queue->gref_rx_head) < 0) {
2013 pr_alert("can't alloc rx grant refs\n");
2014 err = -ENOMEM;
2015 goto exit_free_tx;
2016 }
2017
2018 return 0;
2019
2020 exit_free_tx:
2021 gnttab_free_grant_references(queue->gref_tx_head);
2022 exit:
2023 return err;
2024 }
2025
write_queue_xenstore_keys(struct netfront_queue * queue,struct xenbus_transaction * xbt,int write_hierarchical)2026 static int write_queue_xenstore_keys(struct netfront_queue *queue,
2027 struct xenbus_transaction *xbt, int write_hierarchical)
2028 {
2029 /* Write the queue-specific keys into XenStore in the traditional
2030 * way for a single queue, or in a queue subkeys for multiple
2031 * queues.
2032 */
2033 struct xenbus_device *dev = queue->info->xbdev;
2034 int err;
2035 const char *message;
2036 char *path;
2037 size_t pathsize;
2038
2039 /* Choose the correct place to write the keys */
2040 if (write_hierarchical) {
2041 pathsize = strlen(dev->nodename) + 10;
2042 path = kzalloc(pathsize, GFP_KERNEL);
2043 if (!path) {
2044 err = -ENOMEM;
2045 message = "out of memory while writing ring references";
2046 goto error;
2047 }
2048 snprintf(path, pathsize, "%s/queue-%u",
2049 dev->nodename, queue->id);
2050 } else {
2051 path = (char *)dev->nodename;
2052 }
2053
2054 /* Write ring references */
2055 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2056 queue->tx_ring_ref);
2057 if (err) {
2058 message = "writing tx-ring-ref";
2059 goto error;
2060 }
2061
2062 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2063 queue->rx_ring_ref);
2064 if (err) {
2065 message = "writing rx-ring-ref";
2066 goto error;
2067 }
2068
2069 /* Write event channels; taking into account both shared
2070 * and split event channel scenarios.
2071 */
2072 if (queue->tx_evtchn == queue->rx_evtchn) {
2073 /* Shared event channel */
2074 err = xenbus_printf(*xbt, path,
2075 "event-channel", "%u", queue->tx_evtchn);
2076 if (err) {
2077 message = "writing event-channel";
2078 goto error;
2079 }
2080 } else {
2081 /* Split event channels */
2082 err = xenbus_printf(*xbt, path,
2083 "event-channel-tx", "%u", queue->tx_evtchn);
2084 if (err) {
2085 message = "writing event-channel-tx";
2086 goto error;
2087 }
2088
2089 err = xenbus_printf(*xbt, path,
2090 "event-channel-rx", "%u", queue->rx_evtchn);
2091 if (err) {
2092 message = "writing event-channel-rx";
2093 goto error;
2094 }
2095 }
2096
2097 if (write_hierarchical)
2098 kfree(path);
2099 return 0;
2100
2101 error:
2102 if (write_hierarchical)
2103 kfree(path);
2104 xenbus_dev_fatal(dev, err, "%s", message);
2105 return err;
2106 }
2107
xennet_destroy_queues(struct netfront_info * info)2108 static void xennet_destroy_queues(struct netfront_info *info)
2109 {
2110 unsigned int i;
2111
2112 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
2113 struct netfront_queue *queue = &info->queues[i];
2114
2115 if (netif_running(info->netdev))
2116 napi_disable(&queue->napi);
2117 netif_napi_del(&queue->napi);
2118 }
2119
2120 kfree(info->queues);
2121 info->queues = NULL;
2122 }
2123
2124
2125
xennet_create_page_pool(struct netfront_queue * queue)2126 static int xennet_create_page_pool(struct netfront_queue *queue)
2127 {
2128 int err;
2129 struct page_pool_params pp_params = {
2130 .order = 0,
2131 .flags = 0,
2132 .pool_size = NET_RX_RING_SIZE,
2133 .nid = NUMA_NO_NODE,
2134 .dev = &queue->info->netdev->dev,
2135 .offset = XDP_PACKET_HEADROOM,
2136 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2137 };
2138
2139 queue->page_pool = page_pool_create(&pp_params);
2140 if (IS_ERR(queue->page_pool)) {
2141 err = PTR_ERR(queue->page_pool);
2142 queue->page_pool = NULL;
2143 return err;
2144 }
2145
2146 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2147 queue->id);
2148 if (err) {
2149 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2150 goto err_free_pp;
2151 }
2152
2153 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2154 MEM_TYPE_PAGE_POOL, queue->page_pool);
2155 if (err) {
2156 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2157 goto err_unregister_rxq;
2158 }
2159 return 0;
2160
2161 err_unregister_rxq:
2162 xdp_rxq_info_unreg(&queue->xdp_rxq);
2163 err_free_pp:
2164 page_pool_destroy(queue->page_pool);
2165 queue->page_pool = NULL;
2166 return err;
2167 }
2168
xennet_create_queues(struct netfront_info * info,unsigned int * num_queues)2169 static int xennet_create_queues(struct netfront_info *info,
2170 unsigned int *num_queues)
2171 {
2172 unsigned int i;
2173 int ret;
2174
2175 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2176 GFP_KERNEL);
2177 if (!info->queues)
2178 return -ENOMEM;
2179
2180 for (i = 0; i < *num_queues; i++) {
2181 struct netfront_queue *queue = &info->queues[i];
2182
2183 queue->id = i;
2184 queue->info = info;
2185
2186 ret = xennet_init_queue(queue);
2187 if (ret < 0) {
2188 dev_warn(&info->xbdev->dev,
2189 "only created %d queues\n", i);
2190 *num_queues = i;
2191 break;
2192 }
2193
2194 /* use page pool recycling instead of buddy allocator */
2195 ret = xennet_create_page_pool(queue);
2196 if (ret < 0) {
2197 dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2198 *num_queues = i;
2199 return ret;
2200 }
2201
2202 netif_napi_add(queue->info->netdev, &queue->napi,
2203 xennet_poll, 64);
2204 if (netif_running(info->netdev))
2205 napi_enable(&queue->napi);
2206 }
2207
2208 netif_set_real_num_tx_queues(info->netdev, *num_queues);
2209
2210 if (*num_queues == 0) {
2211 dev_err(&info->xbdev->dev, "no queues\n");
2212 return -EINVAL;
2213 }
2214 return 0;
2215 }
2216
2217 /* Common code used when first setting up, and when resuming. */
talk_to_netback(struct xenbus_device * dev,struct netfront_info * info)2218 static int talk_to_netback(struct xenbus_device *dev,
2219 struct netfront_info *info)
2220 {
2221 const char *message;
2222 struct xenbus_transaction xbt;
2223 int err;
2224 unsigned int feature_split_evtchn;
2225 unsigned int i = 0;
2226 unsigned int max_queues = 0;
2227 struct netfront_queue *queue = NULL;
2228 unsigned int num_queues = 1;
2229
2230 info->netdev->irq = 0;
2231
2232 /* Check if backend supports multiple queues */
2233 max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2234 "multi-queue-max-queues", 1);
2235 num_queues = min(max_queues, xennet_max_queues);
2236
2237 /* Check feature-split-event-channels */
2238 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2239 "feature-split-event-channels", 0);
2240
2241 /* Read mac addr. */
2242 err = xen_net_read_mac(dev, info->netdev->dev_addr);
2243 if (err) {
2244 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2245 goto out_unlocked;
2246 }
2247
2248 info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2249 "feature-xdp-headroom", 0);
2250 if (info->netback_has_xdp_headroom) {
2251 /* set the current xen-netfront xdp state */
2252 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2253 NETBACK_XDP_HEADROOM_ENABLE :
2254 NETBACK_XDP_HEADROOM_DISABLE);
2255 if (err)
2256 goto out_unlocked;
2257 }
2258
2259 rtnl_lock();
2260 if (info->queues)
2261 xennet_destroy_queues(info);
2262
2263 /* For the case of a reconnect reset the "broken" indicator. */
2264 info->broken = false;
2265
2266 err = xennet_create_queues(info, &num_queues);
2267 if (err < 0) {
2268 xenbus_dev_fatal(dev, err, "creating queues");
2269 kfree(info->queues);
2270 info->queues = NULL;
2271 goto out;
2272 }
2273 rtnl_unlock();
2274
2275 /* Create shared ring, alloc event channel -- for each queue */
2276 for (i = 0; i < num_queues; ++i) {
2277 queue = &info->queues[i];
2278 err = setup_netfront(dev, queue, feature_split_evtchn);
2279 if (err)
2280 goto destroy_ring;
2281 }
2282
2283 again:
2284 err = xenbus_transaction_start(&xbt);
2285 if (err) {
2286 xenbus_dev_fatal(dev, err, "starting transaction");
2287 goto destroy_ring;
2288 }
2289
2290 if (xenbus_exists(XBT_NIL,
2291 info->xbdev->otherend, "multi-queue-max-queues")) {
2292 /* Write the number of queues */
2293 err = xenbus_printf(xbt, dev->nodename,
2294 "multi-queue-num-queues", "%u", num_queues);
2295 if (err) {
2296 message = "writing multi-queue-num-queues";
2297 goto abort_transaction_no_dev_fatal;
2298 }
2299 }
2300
2301 if (num_queues == 1) {
2302 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2303 if (err)
2304 goto abort_transaction_no_dev_fatal;
2305 } else {
2306 /* Write the keys for each queue */
2307 for (i = 0; i < num_queues; ++i) {
2308 queue = &info->queues[i];
2309 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2310 if (err)
2311 goto abort_transaction_no_dev_fatal;
2312 }
2313 }
2314
2315 /* The remaining keys are not queue-specific */
2316 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2317 1);
2318 if (err) {
2319 message = "writing request-rx-copy";
2320 goto abort_transaction;
2321 }
2322
2323 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2324 if (err) {
2325 message = "writing feature-rx-notify";
2326 goto abort_transaction;
2327 }
2328
2329 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2330 if (err) {
2331 message = "writing feature-sg";
2332 goto abort_transaction;
2333 }
2334
2335 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2336 if (err) {
2337 message = "writing feature-gso-tcpv4";
2338 goto abort_transaction;
2339 }
2340
2341 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2342 if (err) {
2343 message = "writing feature-gso-tcpv6";
2344 goto abort_transaction;
2345 }
2346
2347 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2348 "1");
2349 if (err) {
2350 message = "writing feature-ipv6-csum-offload";
2351 goto abort_transaction;
2352 }
2353
2354 err = xenbus_transaction_end(xbt, 0);
2355 if (err) {
2356 if (err == -EAGAIN)
2357 goto again;
2358 xenbus_dev_fatal(dev, err, "completing transaction");
2359 goto destroy_ring;
2360 }
2361
2362 return 0;
2363
2364 abort_transaction:
2365 xenbus_dev_fatal(dev, err, "%s", message);
2366 abort_transaction_no_dev_fatal:
2367 xenbus_transaction_end(xbt, 1);
2368 destroy_ring:
2369 xennet_disconnect_backend(info);
2370 rtnl_lock();
2371 xennet_destroy_queues(info);
2372 out:
2373 rtnl_unlock();
2374 out_unlocked:
2375 device_unregister(&dev->dev);
2376 return err;
2377 }
2378
xennet_connect(struct net_device * dev)2379 static int xennet_connect(struct net_device *dev)
2380 {
2381 struct netfront_info *np = netdev_priv(dev);
2382 unsigned int num_queues = 0;
2383 int err;
2384 unsigned int j = 0;
2385 struct netfront_queue *queue = NULL;
2386
2387 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2388 dev_info(&dev->dev,
2389 "backend does not support copying receive path\n");
2390 return -ENODEV;
2391 }
2392
2393 err = talk_to_netback(np->xbdev, np);
2394 if (err)
2395 return err;
2396 if (np->netback_has_xdp_headroom)
2397 pr_info("backend supports XDP headroom\n");
2398
2399 /* talk_to_netback() sets the correct number of queues */
2400 num_queues = dev->real_num_tx_queues;
2401
2402 if (dev->reg_state == NETREG_UNINITIALIZED) {
2403 err = register_netdev(dev);
2404 if (err) {
2405 pr_warn("%s: register_netdev err=%d\n", __func__, err);
2406 device_unregister(&np->xbdev->dev);
2407 return err;
2408 }
2409 }
2410
2411 rtnl_lock();
2412 netdev_update_features(dev);
2413 rtnl_unlock();
2414
2415 /*
2416 * All public and private state should now be sane. Get
2417 * ready to start sending and receiving packets and give the driver
2418 * domain a kick because we've probably just requeued some
2419 * packets.
2420 */
2421 netif_tx_lock_bh(np->netdev);
2422 netif_device_attach(np->netdev);
2423 netif_tx_unlock_bh(np->netdev);
2424
2425 netif_carrier_on(np->netdev);
2426 for (j = 0; j < num_queues; ++j) {
2427 queue = &np->queues[j];
2428
2429 notify_remote_via_irq(queue->tx_irq);
2430 if (queue->tx_irq != queue->rx_irq)
2431 notify_remote_via_irq(queue->rx_irq);
2432
2433 spin_lock_irq(&queue->tx_lock);
2434 xennet_tx_buf_gc(queue);
2435 spin_unlock_irq(&queue->tx_lock);
2436
2437 spin_lock_bh(&queue->rx_lock);
2438 xennet_alloc_rx_buffers(queue);
2439 spin_unlock_bh(&queue->rx_lock);
2440 }
2441
2442 return 0;
2443 }
2444
2445 /**
2446 * Callback received when the backend's state changes.
2447 */
netback_changed(struct xenbus_device * dev,enum xenbus_state backend_state)2448 static void netback_changed(struct xenbus_device *dev,
2449 enum xenbus_state backend_state)
2450 {
2451 struct netfront_info *np = dev_get_drvdata(&dev->dev);
2452 struct net_device *netdev = np->netdev;
2453
2454 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2455
2456 wake_up_all(&module_wq);
2457
2458 switch (backend_state) {
2459 case XenbusStateInitialising:
2460 case XenbusStateInitialised:
2461 case XenbusStateReconfiguring:
2462 case XenbusStateReconfigured:
2463 case XenbusStateUnknown:
2464 break;
2465
2466 case XenbusStateInitWait:
2467 if (dev->state != XenbusStateInitialising)
2468 break;
2469 if (xennet_connect(netdev) != 0)
2470 break;
2471 xenbus_switch_state(dev, XenbusStateConnected);
2472 break;
2473
2474 case XenbusStateConnected:
2475 netdev_notify_peers(netdev);
2476 break;
2477
2478 case XenbusStateClosed:
2479 if (dev->state == XenbusStateClosed)
2480 break;
2481 fallthrough; /* Missed the backend's CLOSING state */
2482 case XenbusStateClosing:
2483 xenbus_frontend_closed(dev);
2484 break;
2485 }
2486 }
2487
2488 static const struct xennet_stat {
2489 char name[ETH_GSTRING_LEN];
2490 u16 offset;
2491 } xennet_stats[] = {
2492 {
2493 "rx_gso_checksum_fixup",
2494 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2495 },
2496 };
2497
xennet_get_sset_count(struct net_device * dev,int string_set)2498 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2499 {
2500 switch (string_set) {
2501 case ETH_SS_STATS:
2502 return ARRAY_SIZE(xennet_stats);
2503 default:
2504 return -EINVAL;
2505 }
2506 }
2507
xennet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2508 static void xennet_get_ethtool_stats(struct net_device *dev,
2509 struct ethtool_stats *stats, u64 * data)
2510 {
2511 void *np = netdev_priv(dev);
2512 int i;
2513
2514 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2515 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2516 }
2517
xennet_get_strings(struct net_device * dev,u32 stringset,u8 * data)2518 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2519 {
2520 int i;
2521
2522 switch (stringset) {
2523 case ETH_SS_STATS:
2524 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2525 memcpy(data + i * ETH_GSTRING_LEN,
2526 xennet_stats[i].name, ETH_GSTRING_LEN);
2527 break;
2528 }
2529 }
2530
2531 static const struct ethtool_ops xennet_ethtool_ops =
2532 {
2533 .get_link = ethtool_op_get_link,
2534
2535 .get_sset_count = xennet_get_sset_count,
2536 .get_ethtool_stats = xennet_get_ethtool_stats,
2537 .get_strings = xennet_get_strings,
2538 .get_ts_info = ethtool_op_get_ts_info,
2539 };
2540
2541 #ifdef CONFIG_SYSFS
show_rxbuf(struct device * dev,struct device_attribute * attr,char * buf)2542 static ssize_t show_rxbuf(struct device *dev,
2543 struct device_attribute *attr, char *buf)
2544 {
2545 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2546 }
2547
store_rxbuf(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)2548 static ssize_t store_rxbuf(struct device *dev,
2549 struct device_attribute *attr,
2550 const char *buf, size_t len)
2551 {
2552 char *endp;
2553 unsigned long target;
2554
2555 if (!capable(CAP_NET_ADMIN))
2556 return -EPERM;
2557
2558 target = simple_strtoul(buf, &endp, 0);
2559 if (endp == buf)
2560 return -EBADMSG;
2561
2562 /* rxbuf_min and rxbuf_max are no longer configurable. */
2563
2564 return len;
2565 }
2566
2567 static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2568 static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2569 static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2570
2571 static struct attribute *xennet_dev_attrs[] = {
2572 &dev_attr_rxbuf_min.attr,
2573 &dev_attr_rxbuf_max.attr,
2574 &dev_attr_rxbuf_cur.attr,
2575 NULL
2576 };
2577
2578 static const struct attribute_group xennet_dev_group = {
2579 .attrs = xennet_dev_attrs
2580 };
2581 #endif /* CONFIG_SYSFS */
2582
xennet_bus_close(struct xenbus_device * dev)2583 static void xennet_bus_close(struct xenbus_device *dev)
2584 {
2585 int ret;
2586
2587 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2588 return;
2589 do {
2590 xenbus_switch_state(dev, XenbusStateClosing);
2591 ret = wait_event_timeout(module_wq,
2592 xenbus_read_driver_state(dev->otherend) ==
2593 XenbusStateClosing ||
2594 xenbus_read_driver_state(dev->otherend) ==
2595 XenbusStateClosed ||
2596 xenbus_read_driver_state(dev->otherend) ==
2597 XenbusStateUnknown,
2598 XENNET_TIMEOUT);
2599 } while (!ret);
2600
2601 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2602 return;
2603
2604 do {
2605 xenbus_switch_state(dev, XenbusStateClosed);
2606 ret = wait_event_timeout(module_wq,
2607 xenbus_read_driver_state(dev->otherend) ==
2608 XenbusStateClosed ||
2609 xenbus_read_driver_state(dev->otherend) ==
2610 XenbusStateUnknown,
2611 XENNET_TIMEOUT);
2612 } while (!ret);
2613 }
2614
xennet_remove(struct xenbus_device * dev)2615 static int xennet_remove(struct xenbus_device *dev)
2616 {
2617 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2618
2619 xennet_bus_close(dev);
2620 xennet_disconnect_backend(info);
2621
2622 if (info->netdev->reg_state == NETREG_REGISTERED)
2623 unregister_netdev(info->netdev);
2624
2625 if (info->queues) {
2626 rtnl_lock();
2627 xennet_destroy_queues(info);
2628 rtnl_unlock();
2629 }
2630 xennet_free_netdev(info->netdev);
2631
2632 return 0;
2633 }
2634
2635 static const struct xenbus_device_id netfront_ids[] = {
2636 { "vif" },
2637 { "" }
2638 };
2639
2640 static struct xenbus_driver netfront_driver = {
2641 .ids = netfront_ids,
2642 .probe = netfront_probe,
2643 .remove = xennet_remove,
2644 .resume = netfront_resume,
2645 .otherend_changed = netback_changed,
2646 };
2647
netif_init(void)2648 static int __init netif_init(void)
2649 {
2650 if (!xen_domain())
2651 return -ENODEV;
2652
2653 if (!xen_has_pv_nic_devices())
2654 return -ENODEV;
2655
2656 pr_info("Initialising Xen virtual ethernet driver\n");
2657
2658 /* Allow as many queues as there are CPUs inut max. 8 if user has not
2659 * specified a value.
2660 */
2661 if (xennet_max_queues == 0)
2662 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2663 num_online_cpus());
2664
2665 return xenbus_register_frontend(&netfront_driver);
2666 }
2667 module_init(netif_init);
2668
2669
netif_exit(void)2670 static void __exit netif_exit(void)
2671 {
2672 xenbus_unregister_driver(&netfront_driver);
2673 }
2674 module_exit(netif_exit);
2675
2676 MODULE_DESCRIPTION("Xen virtual network device frontend");
2677 MODULE_LICENSE("GPL");
2678 MODULE_ALIAS("xen:vif");
2679 MODULE_ALIAS("xennet");
2680