• Home
  • Raw
  • Download

Lines Matching full:vp

104 static void vector_reset_stats(struct vector_private *vp)  in vector_reset_stats()  argument
106 vp->estats.rx_queue_max = 0; in vector_reset_stats()
107 vp->estats.rx_queue_running_average = 0; in vector_reset_stats()
108 vp->estats.tx_queue_max = 0; in vector_reset_stats()
109 vp->estats.tx_queue_running_average = 0; in vector_reset_stats()
110 vp->estats.rx_encaps_errors = 0; in vector_reset_stats()
111 vp->estats.tx_timeout_count = 0; in vector_reset_stats()
112 vp->estats.tx_restart_queue = 0; in vector_reset_stats()
113 vp->estats.tx_kicks = 0; in vector_reset_stats()
114 vp->estats.tx_flow_control_xon = 0; in vector_reset_stats()
115 vp->estats.tx_flow_control_xoff = 0; in vector_reset_stats()
116 vp->estats.sg_ok = 0; in vector_reset_stats()
117 vp->estats.sg_linearized = 0; in vector_reset_stats()
293 static int prep_msg(struct vector_private *vp, in prep_msg() argument
306 if (vp->header_size > 0) { in prep_msg()
307 iov[iov_index].iov_len = vp->header_size; in prep_msg()
308 vp->form_header(iov[iov_index].iov_base, skb, vp); in prep_msg()
314 vp->estats.sg_ok++; in prep_msg()
336 struct vector_private *vp = netdev_priv(qi->dev); in vector_enqueue() local
355 vp, in vector_enqueue()
362 mmsg_vector->msg_hdr.msg_name = vp->fds->remote_addr; in vector_enqueue()
363 mmsg_vector->msg_hdr.msg_namelen = vp->fds->remote_addr_size; in vector_enqueue()
410 struct vector_private *vp = netdev_priv(qi->dev); in vector_send() local
430 vp->fds->tx_fd, in vector_send()
435 vp->in_write_poll = in vector_send()
445 netdev_err(vp->dev, "sendmmsg err=%i\n", in vector_send()
447 vp->in_error = true; in vector_send()
457 if (result > vp->estats.tx_queue_max) in vector_send()
458 vp->estats.tx_queue_max = result; in vector_send()
459 vp->estats.tx_queue_running_average = in vector_send()
460 (vp->estats.tx_queue_running_average + result) >> 1; in vector_send()
468 vp->estats.tx_restart_queue++; in vector_send()
475 tasklet_schedule(&vp->tx_poll); in vector_send()
488 struct vector_private *vp = netdev_priv(qi->dev); in destroy_queue() local
509 if ((vp->header_size > 0) && in destroy_queue()
525 struct vector_private *vp, in create_queue() argument
539 result->dev = vp->dev; in create_queue()
563 if (vp->header_size > 0) in create_queue()
582 if (vp->header_size > 0) { in create_queue()
620 struct vector_private *vp, in prep_skb() argument
623 int linear = vp->max_packet + vp->headroom + SAFETY_MARGIN; in prep_skb()
630 if (vp->req_size <= linear) in prep_skb()
633 len = vp->req_size; in prep_skb()
636 len - vp->max_packet, in prep_skb()
641 if (vp->header_size > 0) in prep_skb()
648 skb_reserve(result, vp->headroom); in prep_skb()
649 result->dev = vp->dev; in prep_skb()
650 skb_put(result, vp->max_packet); in prep_skb()
651 result->data_len = len - vp->max_packet; in prep_skb()
652 result->len += len - vp->max_packet; in prep_skb()
656 iov[iov_index].iov_len = vp->max_packet; in prep_skb()
679 struct vector_private *vp = netdev_priv(qi->dev); in prep_queue_for_rx() local
692 *skbuff_vector = prep_skb(vp, &mmsg_vector->msg_hdr); in prep_queue_for_rx()
801 struct vector_private *vp; in vector_remove() local
807 vp = netdev_priv(dev); in vector_remove()
808 if (vp->fds != NULL) in vector_remove()
842 static int vector_legacy_rx(struct vector_private *vp) in vector_legacy_rx() argument
858 if (vp->header_size > 0) { in vector_legacy_rx()
859 iov[0].iov_base = vp->header_rxbuffer; in vector_legacy_rx()
860 iov[0].iov_len = vp->header_size; in vector_legacy_rx()
863 skb = prep_skb(vp, &hdr); in vector_legacy_rx()
872 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
875 pkt_len = uml_vector_recvmsg(vp->fds->rx_fd, &hdr, 0); in vector_legacy_rx()
877 vp->in_error = true; in vector_legacy_rx()
882 if (pkt_len > vp->header_size) { in vector_legacy_rx()
883 if (vp->header_size > 0) { in vector_legacy_rx()
884 header_check = vp->verify_header( in vector_legacy_rx()
885 vp->header_rxbuffer, skb, vp); in vector_legacy_rx()
888 vp->dev->stats.rx_dropped++; in vector_legacy_rx()
889 vp->estats.rx_encaps_errors++; in vector_legacy_rx()
893 vp->estats.rx_csum_offload_good++; in vector_legacy_rx()
897 pskb_trim(skb, pkt_len - vp->rx_header_size); in vector_legacy_rx()
899 vp->dev->stats.rx_bytes += skb->len; in vector_legacy_rx()
900 vp->dev->stats.rx_packets++; in vector_legacy_rx()
916 static int writev_tx(struct vector_private *vp, struct sk_buff *skb) in writev_tx() argument
921 iov[0].iov_base = vp->header_txbuffer; in writev_tx()
922 iov_count = prep_msg(vp, skb, (struct iovec *) &iov); in writev_tx()
928 vp->fds->tx_fd, in writev_tx()
936 netif_trans_update(vp->dev); in writev_tx()
937 netif_wake_queue(vp->dev); in writev_tx()
940 vp->dev->stats.tx_bytes += skb->len; in writev_tx()
941 vp->dev->stats.tx_packets++; in writev_tx()
943 vp->dev->stats.tx_dropped++; in writev_tx()
948 vp->dev->stats.tx_dropped++; in writev_tx()
951 vp->in_error = true; in writev_tx()
960 static int vector_mmsg_rx(struct vector_private *vp) in vector_mmsg_rx() argument
963 struct vector_queue *qi = vp->rx_queue; in vector_mmsg_rx()
978 vp->fds->rx_fd, qi->mmsg_vector, qi->max_depth, 0); in vector_mmsg_rx()
981 vp->in_error = true; in vector_mmsg_rx()
995 if (mmsg_vector->msg_len > vp->header_size) { in vector_mmsg_rx()
996 if (vp->header_size > 0) { in vector_mmsg_rx()
997 header_check = vp->verify_header( in vector_mmsg_rx()
1000 vp in vector_mmsg_rx()
1009 vp->estats.rx_encaps_errors++; in vector_mmsg_rx()
1013 vp->estats.rx_csum_offload_good++; in vector_mmsg_rx()
1018 mmsg_vector->msg_len - vp->rx_header_size); in vector_mmsg_rx()
1024 vp->dev->stats.rx_bytes += skb->len; in vector_mmsg_rx()
1025 vp->dev->stats.rx_packets++; in vector_mmsg_rx()
1041 if (vp->estats.rx_queue_max < packet_count) in vector_mmsg_rx()
1042 vp->estats.rx_queue_max = packet_count; in vector_mmsg_rx()
1043 vp->estats.rx_queue_running_average = in vector_mmsg_rx()
1044 (vp->estats.rx_queue_running_average + packet_count) >> 1; in vector_mmsg_rx()
1049 static void vector_rx(struct vector_private *vp) in vector_rx() argument
1054 if ((vp->options & VECTOR_RX) > 0) in vector_rx()
1055 while (((err = vector_mmsg_rx(vp)) > 0) && (iter < MAX_ITERATIONS)) in vector_rx()
1058 while (((err = vector_legacy_rx(vp)) > 0) && (iter < MAX_ITERATIONS)) in vector_rx()
1061 netdev_err(vp->dev, "vector_rx: error(%d)\n", err); in vector_rx()
1063 netdev_err(vp->dev, "vector_rx: device stuck, remote end may have closed the connection\n"); in vector_rx()
1068 struct vector_private *vp = netdev_priv(dev); in vector_net_start_xmit() local
1071 if (vp->in_error) { in vector_net_start_xmit()
1072 deactivate_fd(vp->fds->rx_fd, vp->rx_irq); in vector_net_start_xmit()
1073 if ((vp->fds->rx_fd != vp->fds->tx_fd) && (vp->tx_irq != 0)) in vector_net_start_xmit()
1074 deactivate_fd(vp->fds->tx_fd, vp->tx_irq); in vector_net_start_xmit()
1078 if ((vp->options & VECTOR_TX) == 0) { in vector_net_start_xmit()
1079 writev_tx(vp, skb); in vector_net_start_xmit()
1087 netdev_sent_queue(vp->dev, skb->len); in vector_net_start_xmit()
1088 queue_depth = vector_enqueue(vp->tx_queue, skb); in vector_net_start_xmit()
1094 if (queue_depth >= vp->tx_queue->max_depth - 1) { in vector_net_start_xmit()
1095 vp->estats.tx_kicks++; in vector_net_start_xmit()
1097 vector_send(vp->tx_queue); in vector_net_start_xmit()
1101 mod_timer(&vp->tl, vp->coalesce); in vector_net_start_xmit()
1105 vp->estats.tx_kicks++; in vector_net_start_xmit()
1106 vector_send(vp->tx_queue); in vector_net_start_xmit()
1108 tasklet_schedule(&vp->tx_poll); in vector_net_start_xmit()
1115 struct vector_private *vp = netdev_priv(dev); in vector_rx_interrupt() local
1119 vector_rx(vp); in vector_rx_interrupt()
1127 struct vector_private *vp = netdev_priv(dev); in vector_tx_interrupt() local
1138 if (vp->in_write_poll) in vector_tx_interrupt()
1139 tasklet_schedule(&vp->tx_poll); in vector_tx_interrupt()
1148 struct vector_private *vp = netdev_priv(dev); in vector_net_close() local
1152 del_timer(&vp->tl); in vector_net_close()
1154 if (vp->fds == NULL) in vector_net_close()
1158 if (vp->rx_irq > 0) { in vector_net_close()
1159 um_free_irq(vp->rx_irq, dev); in vector_net_close()
1160 vp->rx_irq = 0; in vector_net_close()
1162 if (vp->tx_irq > 0) { in vector_net_close()
1163 um_free_irq(vp->tx_irq, dev); in vector_net_close()
1164 vp->tx_irq = 0; in vector_net_close()
1166 tasklet_kill(&vp->tx_poll); in vector_net_close()
1167 if (vp->fds->rx_fd > 0) { in vector_net_close()
1168 if (vp->bpf) in vector_net_close()
1169 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_close()
1170 os_close_file(vp->fds->rx_fd); in vector_net_close()
1171 vp->fds->rx_fd = -1; in vector_net_close()
1173 if (vp->fds->tx_fd > 0) { in vector_net_close()
1174 os_close_file(vp->fds->tx_fd); in vector_net_close()
1175 vp->fds->tx_fd = -1; in vector_net_close()
1177 if (vp->bpf != NULL) in vector_net_close()
1178 kfree(vp->bpf->filter); in vector_net_close()
1179 kfree(vp->bpf); in vector_net_close()
1180 vp->bpf = NULL; in vector_net_close()
1181 kfree(vp->fds->remote_addr); in vector_net_close()
1182 kfree(vp->transport_data); in vector_net_close()
1183 kfree(vp->header_rxbuffer); in vector_net_close()
1184 kfree(vp->header_txbuffer); in vector_net_close()
1185 if (vp->rx_queue != NULL) in vector_net_close()
1186 destroy_queue(vp->rx_queue); in vector_net_close()
1187 if (vp->tx_queue != NULL) in vector_net_close()
1188 destroy_queue(vp->tx_queue); in vector_net_close()
1189 kfree(vp->fds); in vector_net_close()
1190 vp->fds = NULL; in vector_net_close()
1191 spin_lock_irqsave(&vp->lock, flags); in vector_net_close()
1192 vp->opened = false; in vector_net_close()
1193 vp->in_error = false; in vector_net_close()
1194 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_close()
1202 struct vector_private *vp = (struct vector_private *)data; in vector_tx_poll() local
1204 vp->estats.tx_kicks++; in vector_tx_poll()
1205 vector_send(vp->tx_queue); in vector_tx_poll()
1209 struct vector_private *vp = in vector_reset_tx() local
1211 netdev_reset_queue(vp->dev); in vector_reset_tx()
1212 netif_start_queue(vp->dev); in vector_reset_tx()
1213 netif_wake_queue(vp->dev); in vector_reset_tx()
1218 struct vector_private *vp = netdev_priv(dev); in vector_net_open() local
1223 spin_lock_irqsave(&vp->lock, flags); in vector_net_open()
1224 if (vp->opened) { in vector_net_open()
1225 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_open()
1228 vp->opened = true; in vector_net_open()
1229 spin_unlock_irqrestore(&vp->lock, flags); in vector_net_open()
1231 vp->bpf = uml_vector_user_bpf(get_bpf_file(vp->parsed)); in vector_net_open()
1233 vp->fds = uml_vector_user_open(vp->unit, vp->parsed); in vector_net_open()
1235 if (vp->fds == NULL) in vector_net_open()
1238 if (build_transport_data(vp) < 0) in vector_net_open()
1241 if ((vp->options & VECTOR_RX) > 0) { in vector_net_open()
1242 vp->rx_queue = create_queue( in vector_net_open()
1243 vp, in vector_net_open()
1244 get_depth(vp->parsed), in vector_net_open()
1245 vp->rx_header_size, in vector_net_open()
1248 vp->rx_queue->queue_depth = get_depth(vp->parsed); in vector_net_open()
1250 vp->header_rxbuffer = kmalloc( in vector_net_open()
1251 vp->rx_header_size, in vector_net_open()
1254 if (vp->header_rxbuffer == NULL) in vector_net_open()
1257 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1258 vp->tx_queue = create_queue( in vector_net_open()
1259 vp, in vector_net_open()
1260 get_depth(vp->parsed), in vector_net_open()
1261 vp->header_size, in vector_net_open()
1265 vp->header_txbuffer = kmalloc(vp->header_size, GFP_KERNEL); in vector_net_open()
1266 if (vp->header_txbuffer == NULL) in vector_net_open()
1272 irq_rr + VECTOR_BASE_IRQ, vp->fds->rx_fd, in vector_net_open()
1280 vp->rx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1285 if ((vp->options & VECTOR_TX) > 0) { in vector_net_open()
1287 irq_rr + VECTOR_BASE_IRQ, vp->fds->tx_fd, in vector_net_open()
1296 vp->tx_irq = irq_rr + VECTOR_BASE_IRQ; in vector_net_open()
1300 if ((vp->options & VECTOR_QDISC_BYPASS) != 0) { in vector_net_open()
1301 if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd)) in vector_net_open()
1302 vp->options |= VECTOR_BPF; in vector_net_open()
1304 if (((vp->options & VECTOR_BPF) != 0) && (vp->bpf == NULL)) in vector_net_open()
1305 vp->bpf = uml_vector_default_bpf(dev->dev_addr); in vector_net_open()
1307 if (vp->bpf != NULL) in vector_net_open()
1308 uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_open()
1317 vector_rx(vp); in vector_net_open()
1319 vector_reset_stats(vp); in vector_net_open()
1320 vdevice = find_device(vp->unit); in vector_net_open()
1323 if ((vp->options & VECTOR_TX) != 0) in vector_net_open()
1324 add_timer(&vp->tl); in vector_net_open()
1340 struct vector_private *vp = netdev_priv(dev); in vector_net_tx_timeout() local
1342 vp->estats.tx_timeout_count++; in vector_net_tx_timeout()
1344 schedule_work(&vp->reset_tx); in vector_net_tx_timeout()
1357 struct vector_private *vp = netdev_priv(dev); in vector_set_features() local
1364 vp->req_size = 65536; in vector_set_features()
1367 vp->req_size = vp->max_packet + vp->headroom + SAFETY_MARGIN; in vector_set_features()
1389 struct vector_private *vp = netdev_priv(dev); in vector_net_load_bpf_flash() local
1394 if (!(vp->options & VECTOR_BPF_FLASH)) { in vector_net_load_bpf_flash()
1399 spin_lock(&vp->lock); in vector_net_load_bpf_flash()
1401 if (vp->bpf != NULL) { in vector_net_load_bpf_flash()
1402 if (vp->opened) in vector_net_load_bpf_flash()
1403 uml_vector_detach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_load_bpf_flash()
1404 kfree(vp->bpf->filter); in vector_net_load_bpf_flash()
1405 vp->bpf->filter = NULL; in vector_net_load_bpf_flash()
1407 vp->bpf = kmalloc(sizeof(struct sock_fprog), GFP_ATOMIC); in vector_net_load_bpf_flash()
1408 if (vp->bpf == NULL) { in vector_net_load_bpf_flash()
1414 vdevice = find_device(vp->unit); in vector_net_load_bpf_flash()
1419 vp->bpf->filter = kmemdup(fw->data, fw->size, GFP_ATOMIC); in vector_net_load_bpf_flash()
1420 if (!vp->bpf->filter) in vector_net_load_bpf_flash()
1423 vp->bpf->len = fw->size / sizeof(struct sock_filter); in vector_net_load_bpf_flash()
1426 if (vp->opened) in vector_net_load_bpf_flash()
1427 result = uml_vector_attach_bpf(vp->fds->rx_fd, vp->bpf); in vector_net_load_bpf_flash()
1429 spin_unlock(&vp->lock); in vector_net_load_bpf_flash()
1437 spin_unlock(&vp->lock); in vector_net_load_bpf_flash()
1438 if (vp->bpf != NULL) in vector_net_load_bpf_flash()
1439 kfree(vp->bpf->filter); in vector_net_load_bpf_flash()
1440 kfree(vp->bpf); in vector_net_load_bpf_flash()
1441 vp->bpf = NULL; in vector_net_load_bpf_flash()
1448 struct vector_private *vp = netdev_priv(netdev); in vector_get_ringparam() local
1450 ring->rx_max_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1451 ring->tx_max_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1452 ring->rx_pending = vp->rx_queue->max_depth; in vector_get_ringparam()
1453 ring->tx_pending = vp->tx_queue->max_depth; in vector_get_ringparam()
1487 struct vector_private *vp = netdev_priv(dev); in vector_get_ethtool_stats() local
1489 memcpy(tmp_stats, &vp->estats, sizeof(struct vector_estats)); in vector_get_ethtool_stats()
1495 struct vector_private *vp = netdev_priv(netdev); in vector_get_coalesce() local
1497 ec->tx_coalesce_usecs = (vp->coalesce * 1000000) / HZ; in vector_get_coalesce()
1504 struct vector_private *vp = netdev_priv(netdev); in vector_set_coalesce() local
1506 vp->coalesce = (ec->tx_coalesce_usecs * HZ) / 1000000; in vector_set_coalesce()
1507 if (vp->coalesce == 0) in vector_set_coalesce()
1508 vp->coalesce = 1; in vector_set_coalesce()
1545 struct vector_private *vp = from_timer(vp, t, tl); in vector_timer_expire() local
1547 vp->estats.tx_kicks++; in vector_timer_expire()
1548 vector_send(vp->tx_queue); in vector_timer_expire()
1558 struct vector_private *vp; in vector_eth_configure() local
1585 vp = netdev_priv(dev); in vector_eth_configure()
1602 *vp = ((struct vector_private) in vector_eth_configure()
1604 .list = LIST_HEAD_INIT(vp->list), in vector_eth_configure()
1633 tasklet_init(&vp->tx_poll, vector_tx_poll, (unsigned long)vp); in vector_eth_configure()
1634 INIT_WORK(&vp->reset_tx, vector_reset_tx); in vector_eth_configure()
1636 timer_setup(&vp->tl, vector_timer_expire, 0); in vector_eth_configure()
1637 spin_lock_init(&vp->lock); in vector_eth_configure()