1 /*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31 #include "common.h"
32
33 #include <linux/kthread.h>
34 #include <linux/ethtool.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/if_vlan.h>
37 #include <linux/vmalloc.h>
38
39 #include <xen/events.h>
40 #include <asm/xen/hypercall.h>
41 #include <xen/balloon.h>
42
43 #define XENVIF_QUEUE_LENGTH 32
44 #define XENVIF_NAPI_WEIGHT 64
45
46 /* Number of bytes allowed on the internal guest Rx queue. */
47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
49 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
xenvif_skb_zerocopy_prepare(struct xenvif_queue * queue,struct sk_buff * skb)54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56 {
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59 }
60
xenvif_skb_zerocopy_complete(struct xenvif_queue * queue)61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62 {
63 atomic_dec(&queue->inflight_packets);
64 }
65
xenvif_schedulable(struct xenvif * vif)66 int xenvif_schedulable(struct xenvif *vif)
67 {
68 return netif_running(vif->dev) &&
69 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
70 !vif->disabled;
71 }
72
xenvif_tx_interrupt(int irq,void * dev_id)73 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
74 {
75 struct xenvif_queue *queue = dev_id;
76
77 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
78 napi_schedule(&queue->napi);
79
80 return IRQ_HANDLED;
81 }
82
xenvif_poll(struct napi_struct * napi,int budget)83 int xenvif_poll(struct napi_struct *napi, int budget)
84 {
85 struct xenvif_queue *queue =
86 container_of(napi, struct xenvif_queue, napi);
87 int work_done;
88
89 /* This vif is rogue, we pretend we've there is nothing to do
90 * for this vif to deschedule it from NAPI. But this interface
91 * will be turned off in thread context later.
92 */
93 if (unlikely(queue->vif->disabled)) {
94 napi_complete(napi);
95 return 0;
96 }
97
98 work_done = xenvif_tx_action(queue, budget);
99
100 if (work_done < budget) {
101 napi_complete(napi);
102 /* If the queue is rate-limited, it shall be
103 * rescheduled in the timer callback.
104 */
105 if (likely(!queue->rate_limited))
106 xenvif_napi_schedule_or_enable_events(queue);
107 }
108
109 return work_done;
110 }
111
xenvif_rx_interrupt(int irq,void * dev_id)112 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
113 {
114 struct xenvif_queue *queue = dev_id;
115
116 xenvif_kick_thread(queue);
117
118 return IRQ_HANDLED;
119 }
120
xenvif_interrupt(int irq,void * dev_id)121 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
122 {
123 xenvif_tx_interrupt(irq, dev_id);
124 xenvif_rx_interrupt(irq, dev_id);
125
126 return IRQ_HANDLED;
127 }
128
xenvif_queue_stopped(struct xenvif_queue * queue)129 int xenvif_queue_stopped(struct xenvif_queue *queue)
130 {
131 struct net_device *dev = queue->vif->dev;
132 unsigned int id = queue->id;
133 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
134 }
135
xenvif_wake_queue(struct xenvif_queue * queue)136 void xenvif_wake_queue(struct xenvif_queue *queue)
137 {
138 struct net_device *dev = queue->vif->dev;
139 unsigned int id = queue->id;
140 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
141 }
142
xenvif_start_xmit(struct sk_buff * skb,struct net_device * dev)143 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
144 {
145 struct xenvif *vif = netdev_priv(dev);
146 struct xenvif_queue *queue = NULL;
147 unsigned int num_queues = vif->num_queues;
148 u16 index;
149 struct xenvif_rx_cb *cb;
150
151 BUG_ON(skb->dev != dev);
152
153 /* Drop the packet if queues are not set up */
154 if (num_queues < 1)
155 goto drop;
156
157 /* Obtain the queue to be used to transmit this packet */
158 index = skb_get_queue_mapping(skb);
159 if (index >= num_queues) {
160 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
161 index, vif->dev->name);
162 index %= num_queues;
163 }
164 queue = &vif->queues[index];
165
166 /* Drop the packet if queue is not ready */
167 if (queue->task == NULL ||
168 queue->dealloc_task == NULL ||
169 !xenvif_schedulable(vif))
170 goto drop;
171
172 cb = XENVIF_RX_CB(skb);
173 cb->expires = jiffies + vif->drain_timeout;
174
175 xenvif_rx_queue_tail(queue, skb);
176 xenvif_kick_thread(queue);
177
178 return NETDEV_TX_OK;
179
180 drop:
181 vif->dev->stats.tx_dropped++;
182 dev_kfree_skb(skb);
183 return NETDEV_TX_OK;
184 }
185
xenvif_get_stats(struct net_device * dev)186 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
187 {
188 struct xenvif *vif = netdev_priv(dev);
189 struct xenvif_queue *queue = NULL;
190 unsigned int num_queues = vif->num_queues;
191 unsigned long rx_bytes = 0;
192 unsigned long rx_packets = 0;
193 unsigned long tx_bytes = 0;
194 unsigned long tx_packets = 0;
195 unsigned int index;
196
197 if (vif->queues == NULL)
198 goto out;
199
200 /* Aggregate tx and rx stats from each queue */
201 for (index = 0; index < num_queues; ++index) {
202 queue = &vif->queues[index];
203 rx_bytes += queue->stats.rx_bytes;
204 rx_packets += queue->stats.rx_packets;
205 tx_bytes += queue->stats.tx_bytes;
206 tx_packets += queue->stats.tx_packets;
207 }
208
209 out:
210 vif->dev->stats.rx_bytes = rx_bytes;
211 vif->dev->stats.rx_packets = rx_packets;
212 vif->dev->stats.tx_bytes = tx_bytes;
213 vif->dev->stats.tx_packets = tx_packets;
214
215 return &vif->dev->stats;
216 }
217
xenvif_up(struct xenvif * vif)218 static void xenvif_up(struct xenvif *vif)
219 {
220 struct xenvif_queue *queue = NULL;
221 unsigned int num_queues = vif->num_queues;
222 unsigned int queue_index;
223
224 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
225 queue = &vif->queues[queue_index];
226 napi_enable(&queue->napi);
227 enable_irq(queue->tx_irq);
228 if (queue->tx_irq != queue->rx_irq)
229 enable_irq(queue->rx_irq);
230 xenvif_napi_schedule_or_enable_events(queue);
231 }
232 }
233
xenvif_down(struct xenvif * vif)234 static void xenvif_down(struct xenvif *vif)
235 {
236 struct xenvif_queue *queue = NULL;
237 unsigned int num_queues = vif->num_queues;
238 unsigned int queue_index;
239
240 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
241 queue = &vif->queues[queue_index];
242 napi_disable(&queue->napi);
243 disable_irq(queue->tx_irq);
244 if (queue->tx_irq != queue->rx_irq)
245 disable_irq(queue->rx_irq);
246 del_timer_sync(&queue->credit_timeout);
247 }
248 }
249
xenvif_open(struct net_device * dev)250 static int xenvif_open(struct net_device *dev)
251 {
252 struct xenvif *vif = netdev_priv(dev);
253 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
254 xenvif_up(vif);
255 netif_tx_start_all_queues(dev);
256 return 0;
257 }
258
xenvif_close(struct net_device * dev)259 static int xenvif_close(struct net_device *dev)
260 {
261 struct xenvif *vif = netdev_priv(dev);
262 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
263 xenvif_down(vif);
264 netif_tx_stop_all_queues(dev);
265 return 0;
266 }
267
xenvif_change_mtu(struct net_device * dev,int mtu)268 static int xenvif_change_mtu(struct net_device *dev, int mtu)
269 {
270 struct xenvif *vif = netdev_priv(dev);
271 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN;
272
273 if (mtu > max)
274 return -EINVAL;
275 dev->mtu = mtu;
276 return 0;
277 }
278
xenvif_fix_features(struct net_device * dev,netdev_features_t features)279 static netdev_features_t xenvif_fix_features(struct net_device *dev,
280 netdev_features_t features)
281 {
282 struct xenvif *vif = netdev_priv(dev);
283
284 if (!vif->can_sg)
285 features &= ~NETIF_F_SG;
286 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
287 features &= ~NETIF_F_TSO;
288 if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
289 features &= ~NETIF_F_TSO6;
290 if (!vif->ip_csum)
291 features &= ~NETIF_F_IP_CSUM;
292 if (!vif->ipv6_csum)
293 features &= ~NETIF_F_IPV6_CSUM;
294
295 return features;
296 }
297
298 static const struct xenvif_stat {
299 char name[ETH_GSTRING_LEN];
300 u16 offset;
301 } xenvif_stats[] = {
302 {
303 "rx_gso_checksum_fixup",
304 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
305 },
306 /* If (sent != success + fail), there are probably packets never
307 * freed up properly!
308 */
309 {
310 "tx_zerocopy_sent",
311 offsetof(struct xenvif_stats, tx_zerocopy_sent),
312 },
313 {
314 "tx_zerocopy_success",
315 offsetof(struct xenvif_stats, tx_zerocopy_success),
316 },
317 {
318 "tx_zerocopy_fail",
319 offsetof(struct xenvif_stats, tx_zerocopy_fail)
320 },
321 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
322 * a guest with the same MAX_SKB_FRAG
323 */
324 {
325 "tx_frag_overflow",
326 offsetof(struct xenvif_stats, tx_frag_overflow)
327 },
328 };
329
xenvif_get_sset_count(struct net_device * dev,int string_set)330 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
331 {
332 switch (string_set) {
333 case ETH_SS_STATS:
334 return ARRAY_SIZE(xenvif_stats);
335 default:
336 return -EINVAL;
337 }
338 }
339
xenvif_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)340 static void xenvif_get_ethtool_stats(struct net_device *dev,
341 struct ethtool_stats *stats, u64 * data)
342 {
343 struct xenvif *vif = netdev_priv(dev);
344 unsigned int num_queues = vif->num_queues;
345 int i;
346 unsigned int queue_index;
347 struct xenvif_stats *vif_stats;
348
349 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
350 unsigned long accum = 0;
351 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
352 vif_stats = &vif->queues[queue_index].stats;
353 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
354 }
355 data[i] = accum;
356 }
357 }
358
xenvif_get_strings(struct net_device * dev,u32 stringset,u8 * data)359 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
360 {
361 int i;
362
363 switch (stringset) {
364 case ETH_SS_STATS:
365 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
366 memcpy(data + i * ETH_GSTRING_LEN,
367 xenvif_stats[i].name, ETH_GSTRING_LEN);
368 break;
369 }
370 }
371
372 static const struct ethtool_ops xenvif_ethtool_ops = {
373 .get_link = ethtool_op_get_link,
374
375 .get_sset_count = xenvif_get_sset_count,
376 .get_ethtool_stats = xenvif_get_ethtool_stats,
377 .get_strings = xenvif_get_strings,
378 };
379
380 static const struct net_device_ops xenvif_netdev_ops = {
381 .ndo_start_xmit = xenvif_start_xmit,
382 .ndo_get_stats = xenvif_get_stats,
383 .ndo_open = xenvif_open,
384 .ndo_stop = xenvif_close,
385 .ndo_change_mtu = xenvif_change_mtu,
386 .ndo_fix_features = xenvif_fix_features,
387 .ndo_set_mac_address = eth_mac_addr,
388 .ndo_validate_addr = eth_validate_addr,
389 };
390
xenvif_alloc(struct device * parent,domid_t domid,unsigned int handle)391 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
392 unsigned int handle)
393 {
394 int err;
395 struct net_device *dev;
396 struct xenvif *vif;
397 char name[IFNAMSIZ] = {};
398
399 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
400 /* Allocate a netdev with the max. supported number of queues.
401 * When the guest selects the desired number, it will be updated
402 * via netif_set_real_num_*_queues().
403 */
404 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
405 ether_setup, xenvif_max_queues);
406 if (dev == NULL) {
407 pr_warn("Could not allocate netdev for %s\n", name);
408 return ERR_PTR(-ENOMEM);
409 }
410
411 SET_NETDEV_DEV(dev, parent);
412
413 vif = netdev_priv(dev);
414
415 vif->domid = domid;
416 vif->handle = handle;
417 vif->can_sg = 1;
418 vif->ip_csum = 1;
419 vif->dev = dev;
420 vif->disabled = false;
421 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
422 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
423
424 /* Start out with no queues. */
425 vif->queues = NULL;
426 vif->num_queues = 0;
427
428 spin_lock_init(&vif->lock);
429
430 dev->netdev_ops = &xenvif_netdev_ops;
431 dev->hw_features = NETIF_F_SG |
432 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
433 NETIF_F_TSO | NETIF_F_TSO6;
434 dev->features = dev->hw_features | NETIF_F_RXCSUM;
435 dev->ethtool_ops = &xenvif_ethtool_ops;
436
437 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
438
439 /*
440 * Initialise a dummy MAC address. We choose the numerically
441 * largest non-broadcast address to prevent the address getting
442 * stolen by an Ethernet bridge for STP purposes.
443 * (FE:FF:FF:FF:FF:FF)
444 */
445 memset(dev->dev_addr, 0xFF, ETH_ALEN);
446 dev->dev_addr[0] &= ~0x01;
447
448 netif_carrier_off(dev);
449
450 err = register_netdev(dev);
451 if (err) {
452 netdev_warn(dev, "Could not register device: err=%d\n", err);
453 free_netdev(dev);
454 return ERR_PTR(err);
455 }
456
457 netdev_dbg(dev, "Successfully created xenvif\n");
458
459 __module_get(THIS_MODULE);
460
461 return vif;
462 }
463
xenvif_init_queue(struct xenvif_queue * queue)464 int xenvif_init_queue(struct xenvif_queue *queue)
465 {
466 int err, i;
467
468 queue->credit_bytes = queue->remaining_credit = ~0UL;
469 queue->credit_usec = 0UL;
470 init_timer(&queue->credit_timeout);
471 queue->credit_window_start = get_jiffies_64();
472
473 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
474
475 skb_queue_head_init(&queue->rx_queue);
476 skb_queue_head_init(&queue->tx_queue);
477
478 queue->pending_cons = 0;
479 queue->pending_prod = MAX_PENDING_REQS;
480 for (i = 0; i < MAX_PENDING_REQS; ++i)
481 queue->pending_ring[i] = i;
482
483 spin_lock_init(&queue->callback_lock);
484 spin_lock_init(&queue->response_lock);
485
486 /* If ballooning is disabled, this will consume real memory, so you
487 * better enable it. The long term solution would be to use just a
488 * bunch of valid page descriptors, without dependency on ballooning
489 */
490 err = alloc_xenballooned_pages(MAX_PENDING_REQS,
491 queue->mmap_pages,
492 false);
493 if (err) {
494 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
495 return -ENOMEM;
496 }
497
498 for (i = 0; i < MAX_PENDING_REQS; i++) {
499 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
500 { .callback = xenvif_zerocopy_callback,
501 .ctx = NULL,
502 .desc = i };
503 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
504 }
505
506 return 0;
507 }
508
xenvif_carrier_on(struct xenvif * vif)509 void xenvif_carrier_on(struct xenvif *vif)
510 {
511 rtnl_lock();
512 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
513 dev_set_mtu(vif->dev, ETH_DATA_LEN);
514 netdev_update_features(vif->dev);
515 set_bit(VIF_STATUS_CONNECTED, &vif->status);
516 if (netif_running(vif->dev))
517 xenvif_up(vif);
518 rtnl_unlock();
519 }
520
xenvif_connect(struct xenvif_queue * queue,unsigned long tx_ring_ref,unsigned long rx_ring_ref,unsigned int tx_evtchn,unsigned int rx_evtchn)521 int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
522 unsigned long rx_ring_ref, unsigned int tx_evtchn,
523 unsigned int rx_evtchn)
524 {
525 struct task_struct *task;
526 int err = -ENOMEM;
527
528 BUG_ON(queue->tx_irq);
529 BUG_ON(queue->task);
530 BUG_ON(queue->dealloc_task);
531
532 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
533 if (err < 0)
534 goto err;
535
536 init_waitqueue_head(&queue->wq);
537 init_waitqueue_head(&queue->dealloc_wq);
538 atomic_set(&queue->inflight_packets, 0);
539
540 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
541 XENVIF_NAPI_WEIGHT);
542
543 if (tx_evtchn == rx_evtchn) {
544 /* feature-split-event-channels == 0 */
545 err = bind_interdomain_evtchn_to_irqhandler(
546 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
547 queue->name, queue);
548 if (err < 0)
549 goto err_unmap;
550 queue->tx_irq = queue->rx_irq = err;
551 disable_irq(queue->tx_irq);
552 } else {
553 /* feature-split-event-channels == 1 */
554 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
555 "%s-tx", queue->name);
556 err = bind_interdomain_evtchn_to_irqhandler(
557 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
558 queue->tx_irq_name, queue);
559 if (err < 0)
560 goto err_unmap;
561 queue->tx_irq = err;
562 disable_irq(queue->tx_irq);
563
564 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
565 "%s-rx", queue->name);
566 err = bind_interdomain_evtchn_to_irqhandler(
567 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
568 queue->rx_irq_name, queue);
569 if (err < 0)
570 goto err_tx_unbind;
571 queue->rx_irq = err;
572 disable_irq(queue->rx_irq);
573 }
574
575 queue->stalled = true;
576
577 task = kthread_create(xenvif_kthread_guest_rx,
578 (void *)queue, "%s-guest-rx", queue->name);
579 if (IS_ERR(task)) {
580 pr_warn("Could not allocate kthread for %s\n", queue->name);
581 err = PTR_ERR(task);
582 goto err_rx_unbind;
583 }
584 queue->task = task;
585
586 task = kthread_create(xenvif_dealloc_kthread,
587 (void *)queue, "%s-dealloc", queue->name);
588 if (IS_ERR(task)) {
589 pr_warn("Could not allocate kthread for %s\n", queue->name);
590 err = PTR_ERR(task);
591 goto err_rx_unbind;
592 }
593 queue->dealloc_task = task;
594
595 wake_up_process(queue->task);
596 wake_up_process(queue->dealloc_task);
597
598 return 0;
599
600 err_rx_unbind:
601 unbind_from_irqhandler(queue->rx_irq, queue);
602 queue->rx_irq = 0;
603 err_tx_unbind:
604 unbind_from_irqhandler(queue->tx_irq, queue);
605 queue->tx_irq = 0;
606 err_unmap:
607 xenvif_unmap_frontend_rings(queue);
608 err:
609 module_put(THIS_MODULE);
610 return err;
611 }
612
xenvif_carrier_off(struct xenvif * vif)613 void xenvif_carrier_off(struct xenvif *vif)
614 {
615 struct net_device *dev = vif->dev;
616
617 rtnl_lock();
618 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
619 netif_carrier_off(dev); /* discard queued packets */
620 if (netif_running(dev))
621 xenvif_down(vif);
622 }
623 rtnl_unlock();
624 }
625
xenvif_disconnect(struct xenvif * vif)626 void xenvif_disconnect(struct xenvif *vif)
627 {
628 struct xenvif_queue *queue = NULL;
629 unsigned int num_queues = vif->num_queues;
630 unsigned int queue_index;
631
632 xenvif_carrier_off(vif);
633
634 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
635 queue = &vif->queues[queue_index];
636
637 netif_napi_del(&queue->napi);
638
639 if (queue->task) {
640 kthread_stop(queue->task);
641 queue->task = NULL;
642 }
643
644 if (queue->dealloc_task) {
645 kthread_stop(queue->dealloc_task);
646 queue->dealloc_task = NULL;
647 }
648
649 if (queue->tx_irq) {
650 if (queue->tx_irq == queue->rx_irq)
651 unbind_from_irqhandler(queue->tx_irq, queue);
652 else {
653 unbind_from_irqhandler(queue->tx_irq, queue);
654 unbind_from_irqhandler(queue->rx_irq, queue);
655 }
656 queue->tx_irq = 0;
657 }
658
659 xenvif_unmap_frontend_rings(queue);
660 }
661 }
662
663 /* Reverse the relevant parts of xenvif_init_queue().
664 * Used for queue teardown from xenvif_free(), and on the
665 * error handling paths in xenbus.c:connect().
666 */
xenvif_deinit_queue(struct xenvif_queue * queue)667 void xenvif_deinit_queue(struct xenvif_queue *queue)
668 {
669 free_xenballooned_pages(MAX_PENDING_REQS, queue->mmap_pages);
670 }
671
xenvif_free(struct xenvif * vif)672 void xenvif_free(struct xenvif *vif)
673 {
674 struct xenvif_queue *queue = NULL;
675 unsigned int num_queues = vif->num_queues;
676 unsigned int queue_index;
677
678 unregister_netdev(vif->dev);
679
680 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
681 queue = &vif->queues[queue_index];
682 xenvif_deinit_queue(queue);
683 }
684
685 vfree(vif->queues);
686 vif->queues = NULL;
687 vif->num_queues = 0;
688
689 free_netdev(vif->dev);
690
691 module_put(THIS_MODULE);
692 }
693