Lines Matching +full:0 +full:xd
34 #define TBNET_LOCAL_PATH 0xf
48 #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
59 * supported then @frame_id is filled, otherwise it stays %0.
91 #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0)
150 * @xd: XDomain the service blongs to
181 struct tb_xdomain *xd; member
204 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
205 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
209 UUID_INIT(0x798f589e, 0x3616, 0x8a47,
210 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
238 struct tb_xdomain *xd = net->xd; in tbnet_login_response() local
240 memset(&reply, 0, sizeof(reply)); in tbnet_login_response()
241 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_login_response()
242 xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply), in tbnet_login_response()
247 return tb_xdomain_response(xd, &reply, sizeof(reply), in tbnet_login_response()
255 struct tb_xdomain *xd = net->xd; in tbnet_login_request() local
257 memset(&request, 0, sizeof(request)); in tbnet_login_request()
258 tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, in tbnet_login_request()
259 xd->remote_uuid, TBIP_LOGIN, sizeof(request), in tbnet_login_request()
265 return tb_xdomain_request(xd, &request, sizeof(request), in tbnet_login_request()
275 struct tb_xdomain *xd = net->xd; in tbnet_logout_response() local
277 memset(&reply, 0, sizeof(reply)); in tbnet_logout_response()
278 tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, in tbnet_logout_response()
279 xd->remote_uuid, TBIP_STATUS, sizeof(reply), in tbnet_logout_response()
281 return tb_xdomain_response(xd, &reply, sizeof(reply), in tbnet_logout_response()
289 struct tb_xdomain *xd = net->xd; in tbnet_logout_request() local
291 memset(&request, 0, sizeof(request)); in tbnet_logout_request()
292 tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, in tbnet_logout_request()
293 xd->remote_uuid, TBIP_LOGOUT, sizeof(request), in tbnet_logout_request()
296 return tb_xdomain_request(xd, &request, sizeof(request), in tbnet_logout_request()
328 for (i = 0; i < TBNET_RING_SIZE; i++) { in tbnet_free_buffers()
340 order = 0; in tbnet_free_buffers()
356 ring->cons = 0; in tbnet_free_buffers()
357 ring->prod = 0; in tbnet_free_buffers()
372 while (send_logout && retries-- > 0) { in tbnet_tear_down()
383 if (tb_xdomain_disable_paths(net->xd)) in tbnet_tear_down()
387 net->login_retries = 0; in tbnet_tear_down()
399 int ret = 0; in tbnet_handle_packet()
405 return 0; in tbnet_handle_packet()
406 if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid)) in tbnet_handle_packet()
407 return 0; in tbnet_handle_packet()
408 if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid)) in tbnet_handle_packet()
409 return 0; in tbnet_handle_packet()
413 if (route != net->xd->route) in tbnet_handle_packet()
414 return 0; in tbnet_handle_packet()
438 net->login_retries = 0; in tbnet_handle_packet()
440 &net->login_work, 0); in tbnet_handle_packet()
455 return 0; in tbnet_handle_packet()
483 /* Allocate page (order > 0) so that it can hold maximum in tbnet_alloc_rx_buffers()
493 dma_addr = dma_map_page(dma_dev, tf->page, 0, in tbnet_alloc_rx_buffers()
508 return 0; in tbnet_alloc_rx_buffers()
528 tf->frame.size = 0; in tbnet_get_tx_buffer()
555 for (i = 0; i < TBNET_RING_SIZE; i++) { in tbnet_alloc_tx_buffers()
565 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, in tbnet_alloc_tx_buffers()
581 ring->cons = 0; in tbnet_alloc_tx_buffers()
584 return 0; in tbnet_alloc_tx_buffers()
606 ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH, in tbnet_connected_work()
655 net->login_retries = 0; in tbnet_login_work()
731 if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) { in tbnet_check_frame()
735 if (frame_index != 0) { in tbnet_check_frame()
748 unsigned int rx_packets = 0; in tbnet_poll()
766 cleaned_count = 0; in tbnet_poll()
854 struct tb_xdomain *xd = net->xd; in tbnet_open() local
860 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE, in tbnet_open()
871 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, in tbnet_open()
885 return 0; in tbnet_open()
902 return 0; in tbnet_stop()
908 struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); in tbnet_xmit_csum_and_map()
921 for (i = 0; i < frame_count; i++) { in tbnet_xmit_csum_and_map()
950 *ipcso = 0; in tbnet_xmit_csum_and_map()
962 ip_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
963 ip_hdr(skb)->protocol, 0); in tbnet_xmit_csum_and_map()
967 &ipv6_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
968 IPPROTO_TCP, 0); in tbnet_xmit_csum_and_map()
973 &ipv6_hdr(skb)->daddr, 0, in tbnet_xmit_csum_and_map()
974 ipv6_hdr(skb)->nexthdr, 0); in tbnet_xmit_csum_and_map()
982 for (i = 0; i < frame_count; i++) { in tbnet_xmit_csum_and_map()
989 offset = 0; in tbnet_xmit_csum_and_map()
997 for (i = 0; i < frame_count; i++) { in tbnet_xmit_csum_and_map()
1024 unsigned int frag = 0; in tbnet_start_xmit()
1026 u32 frame_index = 0; in tbnet_start_xmit()
1078 } else if (unlikely(size_left > 0)) { in tbnet_start_xmit()
1081 } while (size_left > 0); in tbnet_start_xmit()
1114 } else if (unlikely(data_len > 0)) { in tbnet_start_xmit()
1127 for (i = 0; i < frame_index + 1; i++) in tbnet_start_xmit()
1179 const struct tb_xdomain *xd = net->xd; in tbnet_generate_mac() local
1183 phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); in tbnet_generate_mac()
1186 dev->dev_addr[0] = phy_port << 4 | 0x02; in tbnet_generate_mac()
1187 hash = jhash2((u32 *)xd->local_uuid, 4, 0); in tbnet_generate_mac()
1189 hash = jhash2((u32 *)xd->local_uuid, 4, hash); in tbnet_generate_mac()
1190 dev->dev_addr[5] = hash & 0xff; in tbnet_generate_mac()
1195 struct tb_xdomain *xd = tb_service_parent(svc); in tbnet_probe() local
1211 atomic_set(&net->command_id, 0); in tbnet_probe()
1212 atomic_set(&net->frame_id, 0); in tbnet_probe()
1215 net->xd = xd; in tbnet_probe()
1260 return 0; in tbnet_probe()
1289 return 0; in tbnet_suspend()
1305 return 0; in tbnet_resume()