• Home
  • Raw
  • Download

Lines Matching full:net

26 #include <net/ip6_checksum.h>
234 static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_login_response() argument
238 struct tb_xdomain *xd = net->xd; in tbnet_login_response()
244 memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); in tbnet_login_response()
251 static int tbnet_login_request(struct tbnet *net, u8 sequence) in tbnet_login_request() argument
255 struct tb_xdomain *xd = net->xd; in tbnet_login_request()
260 atomic_inc_return(&net->command_id)); in tbnet_login_request()
271 static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, in tbnet_logout_response() argument
275 struct tb_xdomain *xd = net->xd; in tbnet_logout_response()
280 atomic_inc_return(&net->command_id)); in tbnet_logout_response()
285 static int tbnet_logout_request(struct tbnet *net) in tbnet_logout_request() argument
289 struct tb_xdomain *xd = net->xd; in tbnet_logout_request()
294 atomic_inc_return(&net->command_id)); in tbnet_logout_request()
302 static void start_login(struct tbnet *net) in start_login() argument
304 mutex_lock(&net->connection_lock); in start_login()
305 net->login_sent = false; in start_login()
306 net->login_received = false; in start_login()
307 mutex_unlock(&net->connection_lock); in start_login()
309 queue_delayed_work(system_long_wq, &net->login_work, in start_login()
313 static void stop_login(struct tbnet *net) in stop_login() argument
315 cancel_delayed_work_sync(&net->login_work); in stop_login()
316 cancel_work_sync(&net->connected_work); in stop_login()
360 static void tbnet_tear_down(struct tbnet *net, bool send_logout) in tbnet_tear_down() argument
362 netif_carrier_off(net->dev); in tbnet_tear_down()
363 netif_stop_queue(net->dev); in tbnet_tear_down()
365 stop_login(net); in tbnet_tear_down()
367 mutex_lock(&net->connection_lock); in tbnet_tear_down()
369 if (net->login_sent && net->login_received) { in tbnet_tear_down()
373 int ret = tbnet_logout_request(net); in tbnet_tear_down()
378 tb_ring_stop(net->rx_ring.ring); in tbnet_tear_down()
379 tb_ring_stop(net->tx_ring.ring); in tbnet_tear_down()
380 tbnet_free_buffers(&net->rx_ring); in tbnet_tear_down()
381 tbnet_free_buffers(&net->tx_ring); in tbnet_tear_down()
383 if (tb_xdomain_disable_paths(net->xd)) in tbnet_tear_down()
384 netdev_warn(net->dev, "failed to disable DMA paths\n"); in tbnet_tear_down()
387 net->login_retries = 0; in tbnet_tear_down()
388 net->login_sent = false; in tbnet_tear_down()
389 net->login_received = false; in tbnet_tear_down()
391 mutex_unlock(&net->connection_lock); in tbnet_tear_down()
397 struct tbnet *net = data; in tbnet_handle_packet() local
406 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
408 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
413 if (route != net->xd->route) in tbnet_handle_packet()
422 if (!netif_running(net->dev)) in tbnet_handle_packet()
425 ret = tbnet_login_response(net, route, sequence, in tbnet_handle_packet()
428 mutex_lock(&net->connection_lock); in tbnet_handle_packet()
429 net->login_received = true; in tbnet_handle_packet()
430 net->transmit_path = pkg->transmit_path; in tbnet_handle_packet()
436 if (net->login_retries >= TBNET_LOGIN_RETRIES || in tbnet_handle_packet()
437 !net->login_sent) { in tbnet_handle_packet()
438 net->login_retries = 0; in tbnet_handle_packet()
440 &net->login_work, 0); in tbnet_handle_packet()
442 mutex_unlock(&net->connection_lock); in tbnet_handle_packet()
444 queue_work(system_long_wq, &net->connected_work); in tbnet_handle_packet()
449 ret = tbnet_logout_response(net, route, sequence, command_id); in tbnet_handle_packet()
451 queue_work(system_long_wq, &net->disconnect_work); in tbnet_handle_packet()
459 netdev_warn(net->dev, "failed to send ThunderboltIP response\n"); in tbnet_handle_packet()
469 static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) in tbnet_alloc_rx_buffers() argument
471 struct tbnet_ring *ring = &net->rx_ring; in tbnet_alloc_rx_buffers()
501 tf->dev = net->dev; in tbnet_alloc_rx_buffers()
515 static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) in tbnet_get_tx_buffer() argument
517 struct tbnet_ring *ring = &net->tx_ring; in tbnet_get_tx_buffer()
540 struct tbnet *net = netdev_priv(tf->dev); in tbnet_tx_callback() local
543 net->tx_ring.prod++; in tbnet_tx_callback()
545 if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2) in tbnet_tx_callback()
546 netif_wake_queue(net->dev); in tbnet_tx_callback()
549 static int tbnet_alloc_tx_buffers(struct tbnet *net) in tbnet_alloc_tx_buffers() argument
551 struct tbnet_ring *ring = &net->tx_ring; in tbnet_alloc_tx_buffers()
574 tf->dev = net->dev; in tbnet_alloc_tx_buffers()
589 struct tbnet *net = container_of(work, typeof(*net), connected_work); in tbnet_connected_work() local
593 if (netif_carrier_ok(net->dev)) in tbnet_connected_work()
596 mutex_lock(&net->connection_lock); in tbnet_connected_work()
597 connected = net->login_sent && net->login_received; in tbnet_connected_work()
598 mutex_unlock(&net->connection_lock); in tbnet_connected_work()
606 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, in tbnet_connected_work()
607 net->rx_ring.ring->hop, in tbnet_connected_work()
608 net->transmit_path, in tbnet_connected_work()
609 net->tx_ring.ring->hop); in tbnet_connected_work()
611 netdev_err(net->dev, "failed to enable DMA paths\n"); in tbnet_connected_work()
615 tb_ring_start(net->tx_ring.ring); in tbnet_connected_work()
616 tb_ring_start(net->rx_ring.ring); in tbnet_connected_work()
618 ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); in tbnet_connected_work()
622 ret = tbnet_alloc_tx_buffers(net); in tbnet_connected_work()
626 netif_carrier_on(net->dev); in tbnet_connected_work()
627 netif_start_queue(net->dev); in tbnet_connected_work()
631 tbnet_free_buffers(&net->rx_ring); in tbnet_connected_work()
633 tb_ring_stop(net->rx_ring.ring); in tbnet_connected_work()
634 tb_ring_stop(net->tx_ring.ring); in tbnet_connected_work()
639 struct tbnet *net = container_of(work, typeof(*net), login_work.work); in tbnet_login_work() local
643 if (netif_carrier_ok(net->dev)) in tbnet_login_work()
646 ret = tbnet_login_request(net, net->login_retries % 4); in tbnet_login_work()
648 if (net->login_retries++ < TBNET_LOGIN_RETRIES) { in tbnet_login_work()
649 queue_delayed_work(system_long_wq, &net->login_work, in tbnet_login_work()
652 netdev_info(net->dev, "ThunderboltIP login timed out\n"); in tbnet_login_work()
655 net->login_retries = 0; in tbnet_login_work()
657 mutex_lock(&net->connection_lock); in tbnet_login_work()
658 net->login_sent = true; in tbnet_login_work()
659 mutex_unlock(&net->connection_lock); in tbnet_login_work()
661 queue_work(system_long_wq, &net->connected_work); in tbnet_login_work()
667 struct tbnet *net = container_of(work, typeof(*net), disconnect_work); in tbnet_disconnect_work() local
669 tbnet_tear_down(net, false); in tbnet_disconnect_work()
672 static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, in tbnet_check_frame() argument
679 net->stats.rx_crc_errors++; in tbnet_check_frame()
682 net->stats.rx_over_errors++; in tbnet_check_frame()
689 net->stats.rx_length_errors++; in tbnet_check_frame()
699 net->stats.rx_length_errors++; in tbnet_check_frame()
706 if (net->skb && net->rx_hdr.frame_count) { in tbnet_check_frame()
708 if (frame_count != net->rx_hdr.frame_count) { in tbnet_check_frame()
709 net->stats.rx_length_errors++; in tbnet_check_frame()
716 if (frame_index != net->rx_hdr.frame_index + 1 || in tbnet_check_frame()
717 frame_id != net->rx_hdr.frame_id) { in tbnet_check_frame()
718 net->stats.rx_missed_errors++; in tbnet_check_frame()
722 if (net->skb->len + frame_size > TBNET_MAX_MTU) { in tbnet_check_frame()
723 net->stats.rx_length_errors++; in tbnet_check_frame()
732 net->stats.rx_length_errors++; in tbnet_check_frame()
736 net->stats.rx_missed_errors++; in tbnet_check_frame()
745 struct tbnet *net = container_of(napi, struct tbnet, napi); in tbnet_poll() local
746 unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring); in tbnet_poll()
747 struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring); in tbnet_poll()
765 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
769 frame = tb_ring_poll(net->rx_ring.ring); in tbnet_poll()
780 net->rx_ring.cons++; in tbnet_poll()
784 if (!tbnet_check_frame(net, tf, hdr)) { in tbnet_poll()
786 dev_kfree_skb_any(net->skb); in tbnet_poll()
787 net->skb = NULL; in tbnet_poll()
793 skb = net->skb; in tbnet_poll()
799 net->stats.rx_errors++; in tbnet_poll()
806 net->skb = skb; in tbnet_poll()
813 net->rx_hdr.frame_size = frame_size; in tbnet_poll()
814 net->rx_hdr.frame_count = le32_to_cpu(hdr->frame_count); in tbnet_poll()
815 net->rx_hdr.frame_index = le16_to_cpu(hdr->frame_index); in tbnet_poll()
816 net->rx_hdr.frame_id = le16_to_cpu(hdr->frame_id); in tbnet_poll()
817 last = net->rx_hdr.frame_index == net->rx_hdr.frame_count - 1; in tbnet_poll()
820 net->stats.rx_bytes += frame_size; in tbnet_poll()
823 skb->protocol = eth_type_trans(skb, net->dev); in tbnet_poll()
824 napi_gro_receive(&net->napi, skb); in tbnet_poll()
825 net->skb = NULL; in tbnet_poll()
829 net->stats.rx_packets += rx_packets; in tbnet_poll()
832 tbnet_alloc_rx_buffers(net, cleaned_count); in tbnet_poll()
839 tb_ring_poll_complete(net->rx_ring.ring); in tbnet_poll()
846 struct tbnet *net = data; in tbnet_start_poll() local
848 napi_schedule(&net->napi); in tbnet_start_poll()
853 struct tbnet *net = netdev_priv(dev); in tbnet_open() local
854 struct tb_xdomain *xd = net->xd; in tbnet_open()
866 net->tx_ring.ring = ring; in tbnet_open()
873 eof_mask, tbnet_start_poll, net); in tbnet_open()
876 tb_ring_free(net->tx_ring.ring); in tbnet_open()
877 net->tx_ring.ring = NULL; in tbnet_open()
880 net->rx_ring.ring = ring; in tbnet_open()
882 napi_enable(&net->napi); in tbnet_open()
883 start_login(net); in tbnet_open()
890 struct tbnet *net = netdev_priv(dev); in tbnet_stop() local
892 napi_disable(&net->napi); in tbnet_stop()
894 cancel_work_sync(&net->disconnect_work); in tbnet_stop()
895 tbnet_tear_down(net, true); in tbnet_stop()
897 tb_ring_free(net->rx_ring.ring); in tbnet_stop()
898 net->rx_ring.ring = NULL; in tbnet_stop()
899 tb_ring_free(net->tx_ring.ring); in tbnet_stop()
900 net->tx_ring.ring = NULL; in tbnet_stop()
905 static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, in tbnet_xmit_csum_and_map() argument
909 struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring); in tbnet_xmit_csum_and_map()
1017 struct tbnet *net = netdev_priv(dev); in tbnet_start_xmit() local
1019 u16 frame_id = atomic_read(&net->frame_id); in tbnet_start_xmit()
1031 if (tbnet_available_buffers(&net->tx_ring) < nframes) { in tbnet_start_xmit()
1032 netif_stop_queue(net->dev); in tbnet_start_xmit()
1036 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1086 frames[frame_index] = tbnet_get_tx_buffer(net); in tbnet_start_xmit()
1124 if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1)) in tbnet_start_xmit()
1128 tb_ring_tx(net->tx_ring.ring, &frames[i]->frame); in tbnet_start_xmit()
1130 if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) in tbnet_start_xmit()
1131 atomic_inc(&net->frame_id); in tbnet_start_xmit()
1133 net->stats.tx_packets++; in tbnet_start_xmit()
1134 net->stats.tx_bytes += skb->len; in tbnet_start_xmit()
1142 net->tx_ring.cons -= frame_index; in tbnet_start_xmit()
1145 net->stats.tx_errors++; in tbnet_start_xmit()
1153 struct tbnet *net = netdev_priv(dev); in tbnet_get_stats64() local
1155 stats->tx_packets = net->stats.tx_packets; in tbnet_get_stats64()
1156 stats->rx_packets = net->stats.rx_packets; in tbnet_get_stats64()
1157 stats->tx_bytes = net->stats.tx_bytes; in tbnet_get_stats64()
1158 stats->rx_bytes = net->stats.rx_bytes; in tbnet_get_stats64()
1159 stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + in tbnet_get_stats64()
1160 net->stats.rx_over_errors + net->stats.rx_crc_errors + in tbnet_get_stats64()
1161 net->stats.rx_missed_errors; in tbnet_get_stats64()
1162 stats->tx_errors = net->stats.tx_errors; in tbnet_get_stats64()
1163 stats->rx_length_errors = net->stats.rx_length_errors; in tbnet_get_stats64()
1164 stats->rx_over_errors = net->stats.rx_over_errors; in tbnet_get_stats64()
1165 stats->rx_crc_errors = net->stats.rx_crc_errors; in tbnet_get_stats64()
1166 stats->rx_missed_errors = net->stats.rx_missed_errors; in tbnet_get_stats64()
1178 const struct tbnet *net = netdev_priv(dev); in tbnet_generate_mac() local
1179 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac()
1197 struct tbnet *net; in tbnet_probe() local
1200 dev = alloc_etherdev(sizeof(*net)); in tbnet_probe()
1206 net = netdev_priv(dev); in tbnet_probe()
1207 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); in tbnet_probe()
1208 INIT_WORK(&net->connected_work, tbnet_connected_work); in tbnet_probe()
1209 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); in tbnet_probe()
1210 mutex_init(&net->connection_lock); in tbnet_probe()
1211 atomic_set(&net->command_id, 0); in tbnet_probe()
1212 atomic_set(&net->frame_id, 0); in tbnet_probe()
1213 net->svc = svc; in tbnet_probe()
1214 net->dev = dev; in tbnet_probe()
1215 net->xd = xd; in tbnet_probe()
1240 netif_napi_add(dev, &net->napi, tbnet_poll, NAPI_POLL_WEIGHT); in tbnet_probe()
1246 net->handler.uuid = &tbnet_svc_uuid; in tbnet_probe()
1247 net->handler.callback = tbnet_handle_packet, in tbnet_probe()
1248 net->handler.data = net; in tbnet_probe()
1249 tb_register_protocol_handler(&net->handler); in tbnet_probe()
1251 tb_service_set_drvdata(svc, net); in tbnet_probe()
1255 tb_unregister_protocol_handler(&net->handler); in tbnet_probe()
1265 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_remove() local
1267 unregister_netdev(net->dev); in tbnet_remove()
1268 tb_unregister_protocol_handler(&net->handler); in tbnet_remove()
1269 free_netdev(net->dev); in tbnet_remove()
1280 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_suspend() local
1282 stop_login(net); in tbnet_suspend()
1283 if (netif_running(net->dev)) { in tbnet_suspend()
1284 netif_device_detach(net->dev); in tbnet_suspend()
1285 tbnet_tear_down(net, true); in tbnet_suspend()
1288 tb_unregister_protocol_handler(&net->handler); in tbnet_suspend()
1295 struct tbnet *net = tb_service_get_drvdata(svc); in tbnet_resume() local
1297 tb_register_protocol_handler(&net->handler); in tbnet_resume()
1299 netif_carrier_off(net->dev); in tbnet_resume()
1300 if (netif_running(net->dev)) { in tbnet_resume()
1301 netif_device_attach(net->dev); in tbnet_resume()
1302 start_login(net); in tbnet_resume()
1321 .name = "thunderbolt-net",