Lines Matching full:net
23 #include <net/ip6_checksum.h>
231 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_login_response() argument
235 struct tb_xdomain *xd = net->xd; in tbnet_login_response()
241 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); in tbnet_login_response()
248 static int tbnet_login_request(struct tbnet *net, u8 sequence) in tbnet_login_request() argument
252 struct tb_xdomain *xd = net->xd; in tbnet_login_request()
257 atomic_inc_return(&net->command_id)); in tbnet_login_request()
268 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_logout_response() argument
272 struct tb_xdomain *xd = net->xd; in tbnet_logout_response()
277 atomic_inc_return(&net->command_id)); in tbnet_logout_response()
282 static int tbnet_logout_request(struct tbnet *net) in tbnet_logout_request() argument
286 struct tb_xdomain *xd = net->xd; in tbnet_logout_request()
291 atomic_inc_return(&net->command_id)); in tbnet_logout_request()
299 static void start_login(struct tbnet *net) in start_login() argument
301 mutex_lock(&net->connection_lock); in start_login()
302 net->login_sent = false; in start_login()
303 net->login_received = false; in start_login()
304 mutex_unlock(&net->connection_lock); in start_login()
306 queue_delayed_work(system_long_wq, &net->login_work, in start_login()
310 static void stop_login(struct tbnet *net) in stop_login() argument
312 cancel_delayed_work_sync(&net->login_work); in stop_login()
313 cancel_work_sync(&net->connected_work); in stop_login()
357 static void tbnet_tear_down(struct tbnet *net, bool send_logout) in tbnet_tear_down() argument
359 netif_carrier_off(net->dev); in tbnet_tear_down()
360 netif_stop_queue(net->dev); in tbnet_tear_down()
362 stop_login(net); in tbnet_tear_down()
364 mutex_lock(&net->connection_lock); in tbnet_tear_down()
366 if (net->login_sent && net->login_received) { in tbnet_tear_down()
370 int ret = tbnet_logout_request(net); in tbnet_tear_down()
375 tb_ring_stop(net->rx_ring.ring); in tbnet_tear_down()
376 tb_ring_stop(net->tx_ring.ring); in tbnet_tear_down()
377 tbnet_free_buffers(&net->rx_ring); in tbnet_tear_down()
378 tbnet_free_buffers(&net->tx_ring); in tbnet_tear_down()
380 if (tb_xdomain_disable_paths(net->xd)) in tbnet_tear_down()
381 netdev_warn(net->dev, "failed to disable DMA paths\n"); in tbnet_tear_down()
384 net->login_retries = 0; in tbnet_tear_down()
385 net->login_sent = false; in tbnet_tear_down()
386 net->login_received = false; in tbnet_tear_down()
388 mutex_unlock(&net->connection_lock); in tbnet_tear_down()
394 struct tbnet *net = data; in tbnet_handle_packet() local
403 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
405 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
410 if (route != net->xd->route) in tbnet_handle_packet()
419 if (!netif_running(net->dev)) in tbnet_handle_packet()
422 ret = tbnet_login_response(net, route, sequence, in tbnet_handle_packet()
425 mutex_lock(&net->connection_lock); in tbnet_handle_packet()
426 net->login_received = true; in tbnet_handle_packet()
427 net->transmit_path = pkg->transmit_path; in tbnet_handle_packet()
433 if (net->login_retries >= TBNET_LOGIN_RETRIES || in tbnet_handle_packet()
434 !net->login_sent) { in tbnet_handle_packet()
435 net->login_retries = 0; in tbnet_handle_packet()
437 &net->login_work, 0); in tbnet_handle_packet()
439 mutex_unlock(&net->connection_lock); in tbnet_handle_packet()
441 queue_work(system_long_wq, &net->connected_work); in tbnet_handle_packet()
446 ret = tbnet_logout_response(net, route, sequence, command_id); in tbnet_handle_packet()
448 queue_work(system_long_wq, &net->disconnect_work); in tbnet_handle_packet()
456 netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); in tbnet_handle_packet()
466 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) in tbnet_alloc_rx_buffers() argument
468 struct tbnet_ring *ring = &net->rx_ring; in tbnet_alloc_rx_buffers()
498 tf->dev = net->dev; in tbnet_alloc_rx_buffers()
512 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) in tbnet_get_tx_buffer() argument
514 struct tbnet_ring *ring = &net->tx_ring; in tbnet_get_tx_buffer()
537 struct tbnet *net = netdev_priv(tf->dev); in tbnet_tx_callback() local
540 net->tx_ring.prod++; in tbnet_tx_callback()
542 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) in tbnet_tx_callback()
543 netif_wake_queue(net->dev); in tbnet_tx_callback()
546 static int tbnet_alloc_tx_buffers(struct tbnet *net) in tbnet_alloc_tx_buffers() argument
548 struct tbnet_ring *ring = &net->tx_ring; in tbnet_alloc_tx_buffers()
571 tf->dev = net->dev; in tbnet_alloc_tx_buffers()
586 struct tbnet *net = container_of(work, typeof(*net), connected_work); in tbnet_connected_work() local
590 if (netif_carrier_ok(net->dev)) in tbnet_connected_work()
593 mutex_lock(&net->connection_lock); in tbnet_connected_work()
594 connected = net->login_sent && net->login_received; in tbnet_connected_work()
595 mutex_unlock(&net->connection_lock); in tbnet_connected_work()
603 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, in tbnet_connected_work()
604 net->rx_ring.ring->hop, in tbnet_connected_work()
605 net->transmit_path, in tbnet_connected_work()
606 net->tx_ring.ring->hop); in tbnet_connected_work()
608 netdev_err(net->dev, "failed to enable DMA paths\n"); in tbnet_connected_work()
612 tb_ring_start(net->tx_ring.ring); in tbnet_connected_work()
613 tb_ring_start(net->rx_ring.ring); in tbnet_connected_work()
615 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); in tbnet_connected_work()
619 ret = tbnet_alloc_tx_buffers(net); in tbnet_connected_work()
623 netif_carrier_on(net->dev); in tbnet_connected_work()
624 netif_start_queue(net->dev); in tbnet_connected_work()
628 tbnet_free_buffers(&net->rx_ring); in tbnet_connected_work()
630 tb_ring_stop(net->rx_ring.ring); in tbnet_connected_work()
631 tb_ring_stop(net->tx_ring.ring); in tbnet_connected_work()
636 struct tbnet *net = container_of(work, typeof(*net), login_work.work); in tbnet_login_work() local
640 if (netif_carrier_ok(net->dev)) in tbnet_login_work()
643 ret = tbnet_login_request(net, net->login_retries % 4); in tbnet_login_work()
645 if (net->login_retries++ < TBNET_LOGIN_RETRIES) { in tbnet_login_work()
646 queue_delayed_work(system_long_wq, &net->login_work, in tbnet_login_work()
649 netdev_info(net->dev, "ThunderboltIP login timed out\n"); in tbnet_login_work()
652 net->login_retries = 0; in tbnet_login_work()
654 mutex_lock(&net->connection_lock); in tbnet_login_work()
655 net->login_sent = true; in tbnet_login_work()
656 mutex_unlock(&net->connection_lock); in tbnet_login_work()
658 queue_work(system_long_wq, &net->connected_work); in tbnet_login_work()
664 struct tbnet *net = container_of(work, typeof(*net), disconnect_work); in tbnet_disconnect_work() local
666 tbnet_tear_down(net, false); in tbnet_disconnect_work()
669 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, in tbnet_check_frame() argument
676 net->stats.rx_crc_errors++; in tbnet_check_frame()
679 net->stats.rx_over_errors++; in tbnet_check_frame()
686 net->stats.rx_length_errors++; in tbnet_check_frame()
696 net->stats.rx_length_errors++; in tbnet_check_frame()
703 if (net->skb && net->rx_hdr.frame_count) { in tbnet_check_frame()
705 if (frame_count != net->rx_hdr.frame_count) { in tbnet_check_frame()
706 net->stats.rx_length_errors++; in tbnet_check_frame()
713 if (frame_index != net->rx_hdr.frame_index + 1 || in tbnet_check_frame()
714 frame_id != net->rx_hdr.frame_id) { in tbnet_check_frame()
715 net->stats.rx_missed_errors++; in tbnet_check_frame()
719 if (net->skb->len + frame_size > TBNET_MAX_MTU) { in tbnet_check_frame()
720 net->stats.rx_length_errors++; in tbnet_check_frame()
729 net->stats.rx_length_errors++; in tbnet_check_frame()
733 net->stats.rx_missed_errors++; in tbnet_check_frame()
742 struct tbnet *net = container_of(napi, struct tbnet, napi); in tbnet_poll() local
743 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); in tbnet_poll()
744 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); in tbnet_poll()
762 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
766 frame = tb_ring_poll(net->rx_ring.ring); in tbnet_poll()
777 net->rx_ring.cons++; in tbnet_poll()
781 if (!tbnet_check_frame(net, tf, hdr)) { in tbnet_poll()
783 dev_kfree_skb_any(net->skb); in tbnet_poll()
784 net->skb = NULL; in tbnet_poll()
790 skb = net->skb; in tbnet_poll()
796 net->stats.rx_errors++; in tbnet_poll()
803 net->skb = skb; in tbnet_poll()
810 net->rx_hdr.frame_size = frame_size; in tbnet_poll()
811 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count); in tbnet_poll()
812 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index); in tbnet_poll()
813 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id); in tbnet_poll()
814 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1; in tbnet_poll()
817 net->stats.rx_bytes += frame_size; in tbnet_poll()
820 skb->protocol = eth_type_trans(skb, net->dev); in tbnet_poll()
821 napi_gro_receive(&net->napi, skb); in tbnet_poll()
822 net->skb = NULL; in tbnet_poll()
826 net->stats.rx_packets += rx_packets; in tbnet_poll()
829 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
836 tb_ring_poll_complete(net->rx_ring.ring); in tbnet_poll()
843 struct tbnet *net = data; in tbnet_start_poll() local
845 napi_schedule(&net->napi); in tbnet_start_poll()
850 struct tbnet *net = netdev_priv(dev); in tbnet_open() local
851 struct tb_xdomain *xd = net->xd; in tbnet_open()
863 net->tx_ring.ring = ring; in tbnet_open()
870 tbnet_start_poll, net); in tbnet_open()
873 tb_ring_free(net->tx_ring.ring); in tbnet_open()
874 net->tx_ring.ring = NULL; in tbnet_open()
877 net->rx_ring.ring = ring; in tbnet_open()
879 napi_enable(&net->napi); in tbnet_open()
880 start_login(net); in tbnet_open()
887 struct tbnet *net = netdev_priv(dev); in tbnet_stop() local
889 napi_disable(&net->napi); in tbnet_stop()
891 cancel_work_sync(&net->disconnect_work); in tbnet_stop()
892 tbnet_tear_down(net, true); in tbnet_stop()
894 tb_ring_free(net->rx_ring.ring); in tbnet_stop()
895 net->rx_ring.ring = NULL; in tbnet_stop()
896 tb_ring_free(net->tx_ring.ring); in tbnet_stop()
897 net->tx_ring.ring = NULL; in tbnet_stop()
902 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, in tbnet_xmit_csum_and_map() argument
906 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); in tbnet_xmit_csum_and_map()
1014 struct tbnet *net = netdev_priv(dev); in tbnet_start_xmit() local
1016 u16 frame_id = atomic_read(&net->frame_id); in tbnet_start_xmit()
1028 if (tbnet_available_buffers(&net->tx_ring) < nframes) { in tbnet_start_xmit()
1029 netif_stop_queue(net->dev); in tbnet_start_xmit()
1033 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1083 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1121 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) in tbnet_start_xmit()
1125 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); in tbnet_start_xmit()
1127 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) in tbnet_start_xmit()
1128 atomic_inc(&net->frame_id); in tbnet_start_xmit()
1130 net->stats.tx_packets++; in tbnet_start_xmit()
1131 net->stats.tx_bytes += skb->len; in tbnet_start_xmit()
1139 net->tx_ring.cons -= frame_index; in tbnet_start_xmit()
1142 net->stats.tx_errors++; in tbnet_start_xmit()
1150 struct tbnet *net = netdev_priv(dev); in tbnet_get_stats64() local
1152 stats->tx_packets = net->stats.tx_packets; in tbnet_get_stats64()
1153 stats->rx_packets = net->stats.rx_packets; in tbnet_get_stats64()
1154 stats->tx_bytes = net->stats.tx_bytes; in tbnet_get_stats64()
1155 stats->rx_bytes = net->stats.rx_bytes; in tbnet_get_stats64()
1156 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + in tbnet_get_stats64()
1157 net->stats.rx_over_errors + net->stats.rx_crc_errors + in tbnet_get_stats64()
1158 net->stats.rx_missed_errors; in tbnet_get_stats64()
1159 stats->tx_errors = net->stats.tx_errors; in tbnet_get_stats64()
1160 stats->rx_length_errors = net->stats.rx_length_errors; in tbnet_get_stats64()
1161 stats->rx_over_errors = net->stats.rx_over_errors; in tbnet_get_stats64()
1162 stats->rx_crc_errors = net->stats.rx_crc_errors; in tbnet_get_stats64()
1163 stats->rx_missed_errors = net->stats.rx_missed_errors; in tbnet_get_stats64()
1175 const struct tbnet *net = netdev_priv(dev); in tbnet_generate_mac() local
1176 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac()
1194 struct tbnet *net; in tbnet_probe() local
1197 dev = alloc_etherdev(sizeof(*net)); in tbnet_probe()
1203 net = netdev_priv(dev); in tbnet_probe()
1204 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); in tbnet_probe()
1205 INIT_WORK(&net->connected_work, tbnet_connected_work); in tbnet_probe()
1206 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); in tbnet_probe()
1207 mutex_init(&net->connection_lock); in tbnet_probe()
1208 atomic_set(&net->command_id, 0); in tbnet_probe()
1209 atomic_set(&net->frame_id, 0); in tbnet_probe()
1210 net->svc = svc; in tbnet_probe()
1211 net->dev = dev; in tbnet_probe()
1212 net->xd = xd; in tbnet_probe()
1237 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT); in tbnet_probe()
1243 net->handler.uuid = &tbnet_svc_uuid; in tbnet_probe()
1244 net->handler.callback = tbnet_handle_packet, in tbnet_probe()
1245 net->handler.data = net; in tbnet_probe()
1246 tb_register_protocol_handler(&net->handler); in tbnet_probe()
1248 tb_service_set_drvdata(svc, net); in tbnet_probe()
1252 tb_unregister_protocol_handler(&net->handler); in tbnet_probe()
1262 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_remove() local
1264 unregister_netdev(net->dev); in tbnet_remove()
1265 tb_unregister_protocol_handler(&net->handler); in tbnet_remove()
1266 free_netdev(net->dev); in tbnet_remove()
1277 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_suspend() local
1279 stop_login(net); in tbnet_suspend()
1280 if (netif_running(net->dev)) { in tbnet_suspend()
1281 netif_device_detach(net->dev); in tbnet_suspend()
1282 tbnet_tear_down(net, true); in tbnet_suspend()
1285 tb_unregister_protocol_handler(&net->handler); in tbnet_suspend()
1292 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_resume() local
1294 tb_register_protocol_handler(&net->handler); in tbnet_resume()
1296 netif_carrier_off(net->dev); in tbnet_resume()
1297 if (netif_running(net->dev)) { in tbnet_resume()
1298 netif_device_attach(net->dev); in tbnet_resume()
1299 start_login(net); in tbnet_resume()
1318 .name = "thunderbolt-net",