• Home
  • Raw
  • Download

Lines Matching full:vi

309 	/* Find end of list, sew whole thing into vi->rq.pages. */  in give_pages()
328 static void enable_delayed_refill(struct virtnet_info *vi) in enable_delayed_refill() argument
330 spin_lock_bh(&vi->refill_lock); in enable_delayed_refill()
331 vi->refill_enabled = true; in enable_delayed_refill()
332 spin_unlock_bh(&vi->refill_lock); in enable_delayed_refill()
335 static void disable_delayed_refill(struct virtnet_info *vi) in disable_delayed_refill() argument
337 spin_lock_bh(&vi->refill_lock); in disable_delayed_refill()
338 vi->refill_enabled = false; in disable_delayed_refill()
339 spin_unlock_bh(&vi->refill_lock); in disable_delayed_refill()
367 struct virtnet_info *vi = vq->vdev->priv; in skb_xmit_done() local
368 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done()
377 netif_wake_subqueue(vi->dev, vq2txq(vq)); in skb_xmit_done()
398 static struct sk_buff *page_to_skb(struct virtnet_info *vi, in page_to_skb() argument
418 hdr_len = vi->hdr_len; in page_to_skb()
419 if (vi->mergeable_rx_bufs) in page_to_skb()
449 if (vi->mergeable_rx_bufs) { in page_to_skb()
484 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, in __virtnet_xdp_xmit_one() argument
491 if (unlikely(xdpf->headroom < vi->hdr_len)) in __virtnet_xdp_xmit_one()
495 xdpf->data -= vi->hdr_len; in __virtnet_xdp_xmit_one()
498 memset(hdr, 0, vi->hdr_len); in __virtnet_xdp_xmit_one()
499 xdpf->len += vi->hdr_len; in __virtnet_xdp_xmit_one()
511 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
519 #define virtnet_xdp_get_sq(vi) ({ \ argument
521 typeof(vi) v = (vi); \
537 #define virtnet_xdp_put_sq(vi, q) { \ argument
539 typeof(vi) v = (vi); \
551 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_xmit() local
552 struct receive_queue *rq = vi->rq; in virtnet_xdp_xmit()
571 sq = virtnet_xdp_get_sq(vi); in virtnet_xdp_xmit()
598 err = __virtnet_xdp_xmit_one(vi, sq, xdpf); in virtnet_xdp_xmit()
619 virtnet_xdp_put_sq(vi, sq); in virtnet_xdp_xmit()
623 static unsigned int virtnet_get_headroom(struct virtnet_info *vi) in virtnet_get_headroom() argument
625 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; in virtnet_get_headroom()
697 struct virtnet_info *vi, in receive_small() argument
708 unsigned int headroom = vi->hdr_len + header_offset; in receive_small()
717 len -= vi->hdr_len; in receive_small()
738 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { in receive_small()
740 unsigned int tlen = len + vi->hdr_len; in receive_small()
743 xdp_headroom = virtnet_get_headroom(vi); in receive_small()
745 headroom = vi->hdr_len + header_offset; in receive_small()
759 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; in receive_small()
783 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_small()
801 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_small()
817 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len); in receive_small()
837 struct virtnet_info *vi, in receive_big() argument
845 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0); in receive_big()
847 stats->bytes += len - vi->hdr_len; in receive_big()
860 struct virtnet_info *vi, in receive_mergeable() argument
869 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); in receive_mergeable()
881 stats->bytes += len - vi->hdr_len; in receive_mergeable()
917 headroom < virtnet_get_headroom(vi))) { in receive_mergeable()
936 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; in receive_mergeable()
937 xdp.data = data + vi->hdr_len; in receive_mergeable()
938 xdp.data_end = xdp.data + (len - vi->hdr_len); in receive_mergeable()
941 xdp.frame_sz = frame_sz - vi->hdr_len; in receive_mergeable()
956 vi->hdr_len - metasize; in receive_mergeable()
961 len = xdp.data_end - xdp.data + vi->hdr_len + metasize; in receive_mergeable()
966 head_skb = page_to_skb(vi, rq, xdp_page, offset, in receive_mergeable()
982 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_mergeable()
1009 trace_xdp_exception(vi->dev, xdp_prog, act); in receive_mergeable()
1019 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog, in receive_mergeable()
1032 virtio16_to_cpu(vi->vdev, in receive_mergeable()
1106 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, in receive_buf() argument
1111 struct net_device *dev = vi->dev; in receive_buf()
1115 if (unlikely(len < vi->hdr_len + ETH_HLEN)) { in receive_buf()
1118 if (vi->mergeable_rx_bufs) { in receive_buf()
1120 } else if (vi->big_packets) { in receive_buf()
1128 if (vi->mergeable_rx_bufs) in receive_buf()
1129 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit, in receive_buf()
1131 else if (vi->big_packets) in receive_buf()
1132 skb = receive_big(dev, vi, rq, buf, len, stats); in receive_buf()
1134 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats); in receive_buf()
1145 virtio_is_little_endian(vi->vdev))) { in receive_buf()
1170 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_small() argument
1175 unsigned int xdp_headroom = virtnet_get_headroom(vi); in add_recvbuf_small()
1177 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; in add_recvbuf_small()
1189 vi->hdr_len + GOOD_PACKET_LEN); in add_recvbuf_small()
1196 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, in add_recvbuf_big() argument
1229 sg_set_buf(&rq->sg[0], p, vi->hdr_len); in add_recvbuf_big()
1261 static int add_recvbuf_mergeable(struct virtnet_info *vi, in add_recvbuf_mergeable() argument
1265 unsigned int headroom = virtnet_get_headroom(vi); in add_recvbuf_mergeable()
1311 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, in try_fill_recv() argument
1318 if (vi->mergeable_rx_bufs) in try_fill_recv()
1319 err = add_recvbuf_mergeable(vi, rq, gfp); in try_fill_recv()
1320 else if (vi->big_packets) in try_fill_recv()
1321 err = add_recvbuf_big(vi, rq, gfp); in try_fill_recv()
1323 err = add_recvbuf_small(vi, rq, gfp); in try_fill_recv()
1342 struct virtnet_info *vi = rvq->vdev->priv; in skb_recv_done() local
1343 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; in skb_recv_done()
1361 static void virtnet_napi_tx_enable(struct virtnet_info *vi, in virtnet_napi_tx_enable() argument
1371 if (!vi->affinity_hint_set) { in virtnet_napi_tx_enable()
1387 struct virtnet_info *vi = in refill_work() local
1392 for (i = 0; i < vi->curr_queue_pairs; i++) { in refill_work()
1393 struct receive_queue *rq = &vi->rq[i]; in refill_work()
1396 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); in refill_work()
1403 schedule_delayed_work(&vi->refill, HZ/2); in refill_work()
1410 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_receive() local
1416 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_receive()
1421 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); in virtnet_receive()
1427 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); in virtnet_receive()
1433 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { in virtnet_receive()
1434 spin_lock(&vi->refill_lock); in virtnet_receive()
1435 if (vi->refill_enabled) in virtnet_receive()
1436 schedule_delayed_work(&vi->refill, 0); in virtnet_receive()
1437 spin_unlock(&vi->refill_lock); in virtnet_receive()
1490 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) in is_xdp_raw_buffer_queue() argument
1492 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) in is_xdp_raw_buffer_queue()
1494 else if (q < vi->curr_queue_pairs) in is_xdp_raw_buffer_queue()
1502 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll_cleantx() local
1504 struct send_queue *sq = &vi->sq[index]; in virtnet_poll_cleantx()
1505 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_cleantx()
1507 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) in virtnet_poll_cleantx()
1523 struct virtnet_info *vi = rq->vq->vdev->priv; in virtnet_poll() local
1540 sq = virtnet_xdp_get_sq(vi); in virtnet_poll()
1546 virtnet_xdp_put_sq(vi, sq); in virtnet_poll()
1554 struct virtnet_info *vi = netdev_priv(dev); in virtnet_open() local
1557 enable_delayed_refill(vi); in virtnet_open()
1559 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_open()
1560 if (i < vi->curr_queue_pairs) in virtnet_open()
1562 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) in virtnet_open()
1563 schedule_delayed_work(&vi->refill, 0); in virtnet_open()
1565 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); in virtnet_open()
1569 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq, in virtnet_open()
1572 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); in virtnet_open()
1576 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_open()
1577 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); in virtnet_open()
1586 struct virtnet_info *vi = sq->vq->vdev->priv; in virtnet_poll_tx() local
1592 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { in virtnet_poll_tx()
1598 txq = netdev_get_tx_queue(vi->dev, index); in virtnet_poll_tx()
1633 struct virtnet_info *vi = sq->vq->vdev->priv; in xmit_skb() local
1635 unsigned hdr_len = vi->hdr_len; in xmit_skb()
1638 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); in xmit_skb()
1640 can_push = vi->any_header_sg && in xmit_skb()
1651 virtio_is_little_endian(vi->vdev), false, in xmit_skb()
1655 if (vi->mergeable_rx_bufs) in xmit_skb()
1678 struct virtnet_info *vi = netdev_priv(dev); in start_xmit() local
1680 struct send_queue *sq = &vi->sq[qnum]; in start_xmit()
1755 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, in virtnet_send_command() argument
1762 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); in virtnet_send_command()
1764 vi->ctrl->status = ~0; in virtnet_send_command()
1765 vi->ctrl->hdr.class = class; in virtnet_send_command()
1766 vi->ctrl->hdr.cmd = cmd; in virtnet_send_command()
1768 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); in virtnet_send_command()
1775 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); in virtnet_send_command()
1779 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); in virtnet_send_command()
1781 if (unlikely(!virtqueue_kick(vi->cvq))) in virtnet_send_command()
1782 return vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command()
1787 while (!virtqueue_get_buf(vi->cvq, &tmp) && in virtnet_send_command()
1788 !virtqueue_is_broken(vi->cvq)) in virtnet_send_command()
1791 return vi->ctrl->status == VIRTIO_NET_OK; in virtnet_send_command()
1796 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_mac_address() local
1797 struct virtio_device *vdev = vi->vdev; in virtnet_set_mac_address()
1802 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_set_mac_address()
1815 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_mac_address()
1844 struct virtnet_info *vi = netdev_priv(dev); in virtnet_stats() local
1848 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_stats()
1850 struct receive_queue *rq = &vi->rq[i]; in virtnet_stats()
1851 struct send_queue *sq = &vi->sq[i]; in virtnet_stats()
1879 static void virtnet_ack_link_announce(struct virtnet_info *vi) in virtnet_ack_link_announce() argument
1882 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, in virtnet_ack_link_announce()
1884 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); in virtnet_ack_link_announce()
1888 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in _virtnet_set_queues() argument
1891 struct net_device *dev = vi->dev; in _virtnet_set_queues()
1893 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) in _virtnet_set_queues()
1896 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); in _virtnet_set_queues()
1897 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); in _virtnet_set_queues()
1899 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, in _virtnet_set_queues()
1905 vi->curr_queue_pairs = queue_pairs; in _virtnet_set_queues()
1908 schedule_delayed_work(&vi->refill, 0); in _virtnet_set_queues()
1914 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) in virtnet_set_queues() argument
1919 err = _virtnet_set_queues(vi, queue_pairs); in virtnet_set_queues()
1926 struct virtnet_info *vi = netdev_priv(dev); in virtnet_close() local
1930 disable_delayed_refill(vi); in virtnet_close()
1932 cancel_delayed_work_sync(&vi->refill); in virtnet_close()
1934 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_close()
1935 napi_disable(&vi->rq[i].napi); in virtnet_close()
1936 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); in virtnet_close()
1937 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_close()
1945 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_rx_mode() local
1955 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) in virtnet_set_rx_mode()
1958 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); in virtnet_set_rx_mode()
1959 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); in virtnet_set_rx_mode()
1961 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); in virtnet_set_rx_mode()
1963 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
1966 vi->ctrl->promisc ? "en" : "dis"); in virtnet_set_rx_mode()
1968 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); in virtnet_set_rx_mode()
1970 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, in virtnet_set_rx_mode()
1973 vi->ctrl->allmulti ? "en" : "dis"); in virtnet_set_rx_mode()
1987 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); in virtnet_set_rx_mode()
1998 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); in virtnet_set_rx_mode()
2006 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, in virtnet_set_rx_mode()
2016 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_add_vid() local
2019 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_add_vid()
2020 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); in virtnet_vlan_rx_add_vid()
2022 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_add_vid()
2031 struct virtnet_info *vi = netdev_priv(dev); in virtnet_vlan_rx_kill_vid() local
2034 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); in virtnet_vlan_rx_kill_vid()
2035 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); in virtnet_vlan_rx_kill_vid()
2037 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, in virtnet_vlan_rx_kill_vid()
2043 static void virtnet_clean_affinity(struct virtnet_info *vi) in virtnet_clean_affinity() argument
2047 if (vi->affinity_hint_set) { in virtnet_clean_affinity()
2048 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_clean_affinity()
2049 virtqueue_set_affinity(vi->rq[i].vq, NULL); in virtnet_clean_affinity()
2050 virtqueue_set_affinity(vi->sq[i].vq, NULL); in virtnet_clean_affinity()
2053 vi->affinity_hint_set = false; in virtnet_clean_affinity()
2057 static void virtnet_set_affinity(struct virtnet_info *vi) in virtnet_set_affinity() argument
2067 virtnet_clean_affinity(vi); in virtnet_set_affinity()
2072 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1); in virtnet_set_affinity()
2073 stragglers = num_cpu >= vi->curr_queue_pairs ? in virtnet_set_affinity()
2074 num_cpu % vi->curr_queue_pairs : in virtnet_set_affinity()
2078 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_set_affinity()
2086 virtqueue_set_affinity(vi->rq[i].vq, mask); in virtnet_set_affinity()
2087 virtqueue_set_affinity(vi->sq[i].vq, mask); in virtnet_set_affinity()
2088 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false); in virtnet_set_affinity()
2092 vi->affinity_hint_set = true; in virtnet_set_affinity()
2098 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_online() local
2100 virtnet_set_affinity(vi); in virtnet_cpu_online()
2106 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_dead() local
2108 virtnet_set_affinity(vi); in virtnet_cpu_dead()
2114 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, in virtnet_cpu_down_prep() local
2117 virtnet_clean_affinity(vi); in virtnet_cpu_down_prep()
2123 static int virtnet_cpu_notif_add(struct virtnet_info *vi) in virtnet_cpu_notif_add() argument
2127 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
2131 &vi->node_dead); in virtnet_cpu_notif_add()
2134 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_add()
2138 static void virtnet_cpu_notif_remove(struct virtnet_info *vi) in virtnet_cpu_notif_remove() argument
2140 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node); in virtnet_cpu_notif_remove()
2142 &vi->node_dead); in virtnet_cpu_notif_remove()
2148 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ringparam() local
2150 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); in virtnet_get_ringparam()
2151 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); in virtnet_get_ringparam()
2160 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_drvinfo() local
2161 struct virtio_device *vdev = vi->vdev; in virtnet_get_drvinfo()
2173 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_channels() local
2183 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) in virtnet_set_channels()
2190 if (vi->rq[0].xdp_prog) in virtnet_set_channels()
2194 err = _virtnet_set_queues(vi, queue_pairs); in virtnet_set_channels()
2199 virtnet_set_affinity(vi); in virtnet_set_channels()
2210 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_strings() local
2216 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_strings()
2224 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_strings()
2237 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_sset_count() local
2241 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN + in virtnet_get_sset_count()
2251 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_ethtool_stats() local
2256 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
2257 struct receive_queue *rq = &vi->rq[i]; in virtnet_get_ethtool_stats()
2270 for (i = 0; i < vi->curr_queue_pairs; i++) { in virtnet_get_ethtool_stats()
2271 struct send_queue *sq = &vi->sq[i]; in virtnet_get_ethtool_stats()
2288 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_channels() local
2290 channels->combined_count = vi->curr_queue_pairs; in virtnet_get_channels()
2291 channels->max_combined = vi->max_queue_pairs; in virtnet_get_channels()
2301 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_link_ksettings() local
2304 &vi->speed, &vi->duplex); in virtnet_set_link_ksettings()
2310 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_link_ksettings() local
2312 cmd->base.speed = vi->speed; in virtnet_get_link_ksettings()
2313 cmd->base.duplex = vi->duplex; in virtnet_get_link_ksettings()
2322 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_coalesce() local
2330 if (napi_weight ^ vi->sq[0].napi.weight) { in virtnet_set_coalesce()
2333 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_set_coalesce()
2334 vi->sq[i].napi.weight = napi_weight; in virtnet_set_coalesce()
2347 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_coalesce() local
2351 if (vi->sq[0].napi.weight) in virtnet_get_coalesce()
2359 struct virtnet_info *vi = netdev_priv(dev); in virtnet_init_settings() local
2361 vi->speed = SPEED_UNKNOWN; in virtnet_init_settings()
2362 vi->duplex = DUPLEX_UNKNOWN; in virtnet_init_settings()
2365 static void virtnet_update_settings(struct virtnet_info *vi) in virtnet_update_settings() argument
2370 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) in virtnet_update_settings()
2373 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); in virtnet_update_settings()
2376 vi->speed = speed; in virtnet_update_settings()
2378 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); in virtnet_update_settings()
2381 vi->duplex = duplex; in virtnet_update_settings()
2403 struct virtnet_info *vi = vdev->priv; in virtnet_freeze_down() local
2406 flush_work(&vi->config_work); in virtnet_freeze_down()
2408 netif_tx_lock_bh(vi->dev); in virtnet_freeze_down()
2409 netif_device_detach(vi->dev); in virtnet_freeze_down()
2410 netif_tx_unlock_bh(vi->dev); in virtnet_freeze_down()
2411 if (netif_running(vi->dev)) in virtnet_freeze_down()
2412 virtnet_close(vi->dev); in virtnet_freeze_down()
2415 static int init_vqs(struct virtnet_info *vi);
2419 struct virtnet_info *vi = vdev->priv; in virtnet_restore_up() local
2422 err = init_vqs(vi); in virtnet_restore_up()
2428 enable_delayed_refill(vi); in virtnet_restore_up()
2430 if (netif_running(vi->dev)) { in virtnet_restore_up()
2431 err = virtnet_open(vi->dev); in virtnet_restore_up()
2436 netif_tx_lock_bh(vi->dev); in virtnet_restore_up()
2437 netif_device_attach(vi->dev); in virtnet_restore_up()
2438 netif_tx_unlock_bh(vi->dev); in virtnet_restore_up()
2442 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) in virtnet_set_guest_offloads() argument
2445 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); in virtnet_set_guest_offloads()
2447 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); in virtnet_set_guest_offloads()
2449 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, in virtnet_set_guest_offloads()
2451 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n"); in virtnet_set_guest_offloads()
2458 static int virtnet_clear_guest_offloads(struct virtnet_info *vi) in virtnet_clear_guest_offloads() argument
2462 if (!vi->guest_offloads) in virtnet_clear_guest_offloads()
2465 return virtnet_set_guest_offloads(vi, offloads); in virtnet_clear_guest_offloads()
2468 static int virtnet_restore_guest_offloads(struct virtnet_info *vi) in virtnet_restore_guest_offloads() argument
2470 u64 offloads = vi->guest_offloads; in virtnet_restore_guest_offloads()
2472 if (!vi->guest_offloads) in virtnet_restore_guest_offloads()
2475 return virtnet_set_guest_offloads(vi, offloads); in virtnet_restore_guest_offloads()
2482 struct virtnet_info *vi = netdev_priv(dev); in virtnet_xdp_set() local
2487 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) in virtnet_xdp_set()
2488 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || in virtnet_xdp_set()
2489 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || in virtnet_xdp_set()
2490 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || in virtnet_xdp_set()
2491 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || in virtnet_xdp_set()
2492 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { in virtnet_xdp_set()
2497 if (vi->mergeable_rx_bufs && !vi->any_header_sg) { in virtnet_xdp_set()
2508 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs; in virtnet_xdp_set()
2513 if (curr_qp + xdp_qp > vi->max_queue_pairs) { in virtnet_xdp_set()
2515 curr_qp + xdp_qp, vi->max_queue_pairs); in virtnet_xdp_set()
2519 old_prog = rtnl_dereference(vi->rq[0].xdp_prog); in virtnet_xdp_set()
2524 bpf_prog_add(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
2528 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
2529 napi_disable(&vi->rq[i].napi); in virtnet_xdp_set()
2530 virtnet_napi_tx_disable(&vi->sq[i].napi); in virtnet_xdp_set()
2535 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
2536 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
2538 virtnet_restore_guest_offloads(vi); in virtnet_xdp_set()
2543 err = _virtnet_set_queues(vi, curr_qp + xdp_qp); in virtnet_xdp_set()
2547 vi->xdp_queue_pairs = xdp_qp; in virtnet_xdp_set()
2550 vi->xdp_enabled = true; in virtnet_xdp_set()
2551 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
2552 rcu_assign_pointer(vi->rq[i].xdp_prog, prog); in virtnet_xdp_set()
2554 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
2557 vi->xdp_enabled = false; in virtnet_xdp_set()
2560 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
2564 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
2565 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2566 &vi->sq[i].napi); in virtnet_xdp_set()
2574 virtnet_clear_guest_offloads(vi); in virtnet_xdp_set()
2575 for (i = 0; i < vi->max_queue_pairs; i++) in virtnet_xdp_set()
2576 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); in virtnet_xdp_set()
2580 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_xdp_set()
2581 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); in virtnet_xdp_set()
2582 virtnet_napi_tx_enable(vi, vi->sq[i].vq, in virtnet_xdp_set()
2583 &vi->sq[i].napi); in virtnet_xdp_set()
2587 bpf_prog_sub(prog, vi->max_queue_pairs - 1); in virtnet_xdp_set()
2604 struct virtnet_info *vi = netdev_priv(dev); in virtnet_get_phys_port_name() local
2607 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY)) in virtnet_get_phys_port_name()
2620 struct virtnet_info *vi = netdev_priv(dev); in virtnet_set_features() local
2625 if (vi->xdp_enabled) in virtnet_set_features()
2629 offloads = vi->guest_offloads_capable; in virtnet_set_features()
2631 offloads = vi->guest_offloads_capable & in virtnet_set_features()
2634 err = virtnet_set_guest_offloads(vi, offloads); in virtnet_set_features()
2637 vi->guest_offloads = offloads; in virtnet_set_features()
2662 struct virtnet_info *vi = in virtnet_config_changed_work() local
2666 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, in virtnet_config_changed_work()
2671 netdev_notify_peers(vi->dev); in virtnet_config_changed_work()
2672 virtnet_ack_link_announce(vi); in virtnet_config_changed_work()
2678 if (vi->status == v) in virtnet_config_changed_work()
2681 vi->status = v; in virtnet_config_changed_work()
2683 if (vi->status & VIRTIO_NET_S_LINK_UP) { in virtnet_config_changed_work()
2684 virtnet_update_settings(vi); in virtnet_config_changed_work()
2685 netif_carrier_on(vi->dev); in virtnet_config_changed_work()
2686 netif_tx_wake_all_queues(vi->dev); in virtnet_config_changed_work()
2688 netif_carrier_off(vi->dev); in virtnet_config_changed_work()
2689 netif_tx_stop_all_queues(vi->dev); in virtnet_config_changed_work()
2695 struct virtnet_info *vi = vdev->priv; in virtnet_config_changed() local
2697 schedule_work(&vi->config_work); in virtnet_config_changed()
2700 static void virtnet_free_queues(struct virtnet_info *vi) in virtnet_free_queues() argument
2704 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_free_queues()
2705 __netif_napi_del(&vi->rq[i].napi); in virtnet_free_queues()
2706 __netif_napi_del(&vi->sq[i].napi); in virtnet_free_queues()
2710 * we need to respect an RCU grace period before freeing vi->rq in virtnet_free_queues()
2714 kfree(vi->rq); in virtnet_free_queues()
2715 kfree(vi->sq); in virtnet_free_queues()
2716 kfree(vi->ctrl); in virtnet_free_queues()
2719 static void _free_receive_bufs(struct virtnet_info *vi) in _free_receive_bufs() argument
2724 for (i = 0; i < vi->max_queue_pairs; i++) { in _free_receive_bufs()
2725 while (vi->rq[i].pages) in _free_receive_bufs()
2726 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); in _free_receive_bufs()
2728 old_prog = rtnl_dereference(vi->rq[i].xdp_prog); in _free_receive_bufs()
2729 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL); in _free_receive_bufs()
2735 static void free_receive_bufs(struct virtnet_info *vi) in free_receive_bufs() argument
2738 _free_receive_bufs(vi); in free_receive_bufs()
2742 static void free_receive_page_frags(struct virtnet_info *vi) in free_receive_page_frags() argument
2745 for (i = 0; i < vi->max_queue_pairs; i++) in free_receive_page_frags()
2746 if (vi->rq[i].alloc_frag.page) in free_receive_page_frags()
2747 put_page(vi->rq[i].alloc_frag.page); in free_receive_page_frags()
2760 struct virtnet_info *vi = vq->vdev->priv; in virtnet_rq_free_unused_buf() local
2763 if (vi->mergeable_rx_bufs) in virtnet_rq_free_unused_buf()
2765 else if (vi->big_packets) in virtnet_rq_free_unused_buf()
2766 give_pages(&vi->rq[i], buf); in virtnet_rq_free_unused_buf()
2771 static void free_unused_bufs(struct virtnet_info *vi) in free_unused_bufs() argument
2776 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
2777 struct virtqueue *vq = vi->sq[i].vq; in free_unused_bufs()
2783 for (i = 0; i < vi->max_queue_pairs; i++) { in free_unused_bufs()
2784 struct virtqueue *vq = vi->rq[i].vq; in free_unused_bufs()
2791 static void virtnet_del_vqs(struct virtnet_info *vi) in virtnet_del_vqs() argument
2793 struct virtio_device *vdev = vi->vdev; in virtnet_del_vqs()
2795 virtnet_clean_affinity(vi); in virtnet_del_vqs()
2799 virtnet_free_queues(vi); in virtnet_del_vqs()
2806 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq) in mergeable_min_buf_len() argument
2810 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu; in mergeable_min_buf_len()
2818 static int virtnet_find_vqs(struct virtnet_info *vi) in virtnet_find_vqs() argument
2832 total_vqs = vi->max_queue_pairs * 2 + in virtnet_find_vqs()
2833 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); in virtnet_find_vqs()
2845 if (!vi->big_packets || vi->mergeable_rx_bufs) { in virtnet_find_vqs()
2854 if (vi->has_cvq) { in virtnet_find_vqs()
2860 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
2863 sprintf(vi->rq[i].name, "input.%u", i); in virtnet_find_vqs()
2864 sprintf(vi->sq[i].name, "output.%u", i); in virtnet_find_vqs()
2865 names[rxq2vq(i)] = vi->rq[i].name; in virtnet_find_vqs()
2866 names[txq2vq(i)] = vi->sq[i].name; in virtnet_find_vqs()
2871 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, in virtnet_find_vqs()
2876 if (vi->has_cvq) { in virtnet_find_vqs()
2877 vi->cvq = vqs[total_vqs - 1]; in virtnet_find_vqs()
2878 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) in virtnet_find_vqs()
2879 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in virtnet_find_vqs()
2882 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_find_vqs()
2883 vi->rq[i].vq = vqs[rxq2vq(i)]; in virtnet_find_vqs()
2884 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq); in virtnet_find_vqs()
2885 vi->sq[i].vq = vqs[txq2vq(i)]; in virtnet_find_vqs()
2903 static int virtnet_alloc_queues(struct virtnet_info *vi) in virtnet_alloc_queues() argument
2907 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); in virtnet_alloc_queues()
2908 if (!vi->ctrl) in virtnet_alloc_queues()
2910 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); in virtnet_alloc_queues()
2911 if (!vi->sq) in virtnet_alloc_queues()
2913 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL); in virtnet_alloc_queues()
2914 if (!vi->rq) in virtnet_alloc_queues()
2917 INIT_DELAYED_WORK(&vi->refill, refill_work); in virtnet_alloc_queues()
2918 for (i = 0; i < vi->max_queue_pairs; i++) { in virtnet_alloc_queues()
2919 vi->rq[i].pages = NULL; in virtnet_alloc_queues()
2920 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, in virtnet_alloc_queues()
2922 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, in virtnet_alloc_queues()
2925 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); in virtnet_alloc_queues()
2926 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); in virtnet_alloc_queues()
2927 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); in virtnet_alloc_queues()
2929 u64_stats_init(&vi->rq[i].stats.syncp); in virtnet_alloc_queues()
2930 u64_stats_init(&vi->sq[i].stats.syncp); in virtnet_alloc_queues()
2936 kfree(vi->sq); in virtnet_alloc_queues()
2938 kfree(vi->ctrl); in virtnet_alloc_queues()
2943 static int init_vqs(struct virtnet_info *vi) in init_vqs() argument
2948 ret = virtnet_alloc_queues(vi); in init_vqs()
2952 ret = virtnet_find_vqs(vi); in init_vqs()
2957 virtnet_set_affinity(vi); in init_vqs()
2963 virtnet_free_queues(vi); in init_vqs()
2972 struct virtnet_info *vi = netdev_priv(queue->dev); in mergeable_rx_buffer_size_show() local
2974 unsigned int headroom = virtnet_get_headroom(vi); in mergeable_rx_buffer_size_show()
2978 BUG_ON(queue_index >= vi->max_queue_pairs); in mergeable_rx_buffer_size_show()
2979 avg = &vi->rq[queue_index].mrg_avg_pkt_len; in mergeable_rx_buffer_size_show()
2981 get_mergeable_buf_len(&vi->rq[queue_index], avg, in mergeable_rx_buffer_size_show()
3062 struct virtnet_info *vi; in virtnet_probe() local
3138 vi = netdev_priv(dev); in virtnet_probe()
3139 vi->dev = dev; in virtnet_probe()
3140 vi->vdev = vdev; in virtnet_probe()
3141 vdev->priv = vi; in virtnet_probe()
3143 INIT_WORK(&vi->config_work, virtnet_config_changed_work); in virtnet_probe()
3144 spin_lock_init(&vi->refill_lock); in virtnet_probe()
3151 vi->big_packets = true; in virtnet_probe()
3154 vi->mergeable_rx_bufs = true; in virtnet_probe()
3158 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); in virtnet_probe()
3160 vi->hdr_len = sizeof(struct virtio_net_hdr); in virtnet_probe()
3164 vi->any_header_sg = true; in virtnet_probe()
3167 vi->has_cvq = true; in virtnet_probe()
3189 vi->big_packets = true; in virtnet_probe()
3192 if (vi->any_header_sg) in virtnet_probe()
3193 dev->needed_headroom = vi->hdr_len; in virtnet_probe()
3197 vi->curr_queue_pairs = max_queue_pairs; in virtnet_probe()
3199 vi->curr_queue_pairs = num_online_cpus(); in virtnet_probe()
3200 vi->max_queue_pairs = max_queue_pairs; in virtnet_probe()
3203 err = init_vqs(vi); in virtnet_probe()
3208 if (vi->mergeable_rx_bufs) in virtnet_probe()
3211 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
3212 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); in virtnet_probe()
3217 vi->failover = net_failover_create(vi->dev); in virtnet_probe()
3218 if (IS_ERR(vi->failover)) { in virtnet_probe()
3219 err = PTR_ERR(vi->failover); in virtnet_probe()
3236 _virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_probe()
3240 err = virtnet_cpu_notif_add(vi); in virtnet_probe()
3249 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { in virtnet_probe()
3250 schedule_work(&vi->config_work); in virtnet_probe()
3252 vi->status = VIRTIO_NET_S_LINK_UP; in virtnet_probe()
3253 virtnet_update_settings(vi); in virtnet_probe()
3258 if (virtio_has_feature(vi->vdev, guest_offloads[i])) in virtnet_probe()
3259 set_bit(guest_offloads[i], &vi->guest_offloads); in virtnet_probe()
3260 vi->guest_offloads_capable = vi->guest_offloads; in virtnet_probe()
3268 vi->vdev->config->reset(vdev); in virtnet_probe()
3272 net_failover_destroy(vi->failover); in virtnet_probe()
3274 cancel_delayed_work_sync(&vi->refill); in virtnet_probe()
3275 free_receive_page_frags(vi); in virtnet_probe()
3276 virtnet_del_vqs(vi); in virtnet_probe()
3282 static void remove_vq_common(struct virtnet_info *vi) in remove_vq_common() argument
3284 vi->vdev->config->reset(vi->vdev); in remove_vq_common()
3287 free_unused_bufs(vi); in remove_vq_common()
3289 free_receive_bufs(vi); in remove_vq_common()
3291 free_receive_page_frags(vi); in remove_vq_common()
3293 virtnet_del_vqs(vi); in remove_vq_common()
3298 struct virtnet_info *vi = vdev->priv; in virtnet_remove() local
3300 virtnet_cpu_notif_remove(vi); in virtnet_remove()
3303 flush_work(&vi->config_work); in virtnet_remove()
3305 unregister_netdev(vi->dev); in virtnet_remove()
3307 net_failover_destroy(vi->failover); in virtnet_remove()
3309 remove_vq_common(vi); in virtnet_remove()
3311 free_netdev(vi->dev); in virtnet_remove()
3316 struct virtnet_info *vi = vdev->priv; in virtnet_freeze() local
3318 virtnet_cpu_notif_remove(vi); in virtnet_freeze()
3320 remove_vq_common(vi); in virtnet_freeze()
3327 struct virtnet_info *vi = vdev->priv; in virtnet_restore() local
3333 virtnet_set_queues(vi, vi->curr_queue_pairs); in virtnet_restore()
3335 err = virtnet_cpu_notif_add(vi); in virtnet_restore()
3338 remove_vq_common(vi); in virtnet_restore()