/net/rxrpc/ |
D | ar-peer.c | 36 static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) in rxrpc_assess_MTU_size() argument 41 peer->if_mtu = 1500; in rxrpc_assess_MTU_size() 44 peer->srx.transport.sin.sin_addr.s_addr, 0, in rxrpc_assess_MTU_size() 52 peer->if_mtu = dst_mtu(&rt->dst); in rxrpc_assess_MTU_size() 55 _leave(" [if_mtu %u]", peer->if_mtu); in rxrpc_assess_MTU_size() 64 struct rxrpc_peer *peer; in rxrpc_alloc_peer() local 68 peer = kzalloc(sizeof(struct rxrpc_peer), gfp); in rxrpc_alloc_peer() 69 if (peer) { in rxrpc_alloc_peer() 70 INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer); in rxrpc_alloc_peer() 71 INIT_LIST_HEAD(&peer->link); in rxrpc_alloc_peer() [all …]
|
D | ar-error.c | 33 struct rxrpc_peer *peer; in rxrpc_UDP_error_report() local 55 peer = rxrpc_find_peer(local, addr, port); in rxrpc_UDP_error_report() 56 if (IS_ERR(peer)) { in rxrpc_UDP_error_report() 62 trans = rxrpc_find_transport(local, peer); in rxrpc_UDP_error_report() 64 rxrpc_put_peer(peer); in rxrpc_UDP_error_report() 79 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { in rxrpc_UDP_error_report() 80 peer->if_mtu = mtu; in rxrpc_UDP_error_report() 92 if (mtu < peer->hdrsize) in rxrpc_UDP_error_report() 93 mtu = peer->hdrsize + 4; in rxrpc_UDP_error_report() 97 if (mtu < peer->mtu) { in rxrpc_UDP_error_report() [all …]
|
D | ar-transport.c | 31 struct rxrpc_peer *peer, in rxrpc_alloc_transport() argument 41 trans->peer = peer; in rxrpc_alloc_transport() 52 if (peer->srx.transport.family == AF_INET) { in rxrpc_alloc_transport() 53 switch (peer->srx.transport_type) { in rxrpc_alloc_transport() 75 struct rxrpc_peer *peer, in rxrpc_get_transport() argument 85 &peer->srx.transport.sin.sin_addr, in rxrpc_get_transport() 86 ntohs(peer->srx.transport.sin.sin_port)); in rxrpc_get_transport() 91 if (trans->local == local && trans->peer == peer) in rxrpc_get_transport() 98 candidate = rxrpc_alloc_transport(local, peer, gfp); in rxrpc_get_transport() 107 if (trans->local == local && trans->peer == peer) in rxrpc_get_transport() [all …]
|
D | ar-proc.c | 69 &trans->peer->srx.transport.sin.sin_addr, in rxrpc_call_seq_show() 70 ntohs(trans->peer->srx.transport.sin.sin_port)); in rxrpc_call_seq_show() 152 &trans->peer->srx.transport.sin.sin_addr, in rxrpc_connection_seq_show() 153 ntohs(trans->peer->srx.transport.sin.sin_port)); in rxrpc_connection_seq_show()
|
D | ar-ack.c | 554 struct rxrpc_peer *peer; in rxrpc_extract_ackinfo() local 569 peer = call->conn->trans->peer; in rxrpc_extract_ackinfo() 570 if (mtu < peer->maxdata) { in rxrpc_extract_ackinfo() 571 spin_lock_bh(&peer->lock); in rxrpc_extract_ackinfo() 572 peer->maxdata = mtu; in rxrpc_extract_ackinfo() 573 peer->mtu = mtu + peer->hdrsize; in rxrpc_extract_ackinfo() 574 spin_unlock_bh(&peer->lock); in rxrpc_extract_ackinfo() 575 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata); in rxrpc_extract_ackinfo() 857 msg.msg_name = &call->conn->trans->peer->srx.transport.sin; in rxrpc_process_call() 858 msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin); in rxrpc_process_call() [all …]
|
D | ar-accept.c | 81 struct rxrpc_peer *peer; in rxrpc_accept_incoming_call() local 100 peer = rxrpc_get_peer(srx, GFP_NOIO); in rxrpc_accept_incoming_call() 101 if (IS_ERR(peer)) { in rxrpc_accept_incoming_call() 107 trans = rxrpc_get_transport(local, peer, GFP_NOIO); in rxrpc_accept_incoming_call() 108 rxrpc_put_peer(peer); in rxrpc_accept_incoming_call()
|
D | ar-output.c | 346 msg.msg_name = &trans->peer->srx.transport.sin; in rxrpc_send_packet() 347 msg.msg_namelen = sizeof(trans->peer->srx.transport.sin); in rxrpc_send_packet() 354 if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) { in rxrpc_send_packet() 369 _leave(" = %d [%u]", ret, trans->peer->maxdata); in rxrpc_send_packet() 391 _leave(" = %d [frag %u]", ret, trans->peer->maxdata); in rxrpc_send_packet() 582 max = call->conn->trans->peer->maxdata; in rxrpc_send_data() 725 ret = call->conn->trans->peer->net_error; in rxrpc_send_data()
|
D | ar-call.c | 131 spin_lock(&call->conn->trans->peer->lock); in rxrpc_alloc_client_call() 132 list_add(&call->error_link, &call->conn->trans->peer->error_targets); in rxrpc_alloc_client_call() 133 spin_unlock(&call->conn->trans->peer->lock); in rxrpc_alloc_client_call() 342 spin_lock(&conn->trans->peer->lock); in rxrpc_incoming_call() 343 list_add(&call->error_link, &conn->trans->peer->error_targets); in rxrpc_incoming_call() 344 spin_unlock(&conn->trans->peer->lock); in rxrpc_incoming_call() 658 spin_lock(&call->conn->trans->peer->lock); in rxrpc_cleanup_call() 660 spin_unlock(&call->conn->trans->peer->lock); in rxrpc_cleanup_call()
|
/net/sctp/ |
D | associola.c | 210 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; in sctp_association_init() 221 asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */ in sctp_association_init() 253 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); in sctp_association_init() 254 asoc->peer.transport_count = 0; in sctp_association_init() 267 asoc->peer.sack_needed = 1; in sctp_association_init() 268 asoc->peer.sack_cnt = 0; in sctp_association_init() 269 asoc->peer.sack_generation = 1; in sctp_association_init() 276 asoc->peer.asconf_capable = 0; in sctp_association_init() 278 asoc->peer.asconf_capable = 1; in sctp_association_init() 293 memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap)); in sctp_association_init() [all …]
|
D | transport.c | 63 struct sctp_transport *peer, in sctp_transport_init() argument 68 peer->ipaddr = *addr; in sctp_transport_init() 69 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); in sctp_transport_init() 70 memset(&peer->saddr, 0, sizeof(union sctp_addr)); in sctp_transport_init() 72 peer->sack_generation = 0; in sctp_transport_init() 80 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); in sctp_transport_init() 82 peer->last_time_heard = jiffies; in sctp_transport_init() 83 peer->last_time_ecne_reduced = jiffies; in sctp_transport_init() 85 peer->param_flags = SPP_HB_DISABLE | in sctp_transport_init() 90 peer->pathmaxrxt = net->sctp.max_retrans_path; in sctp_transport_init() [all …]
|
D | sm_make_chunk.c | 407 if (asoc->peer.ecn_capable) in sctp_make_init_ack() 410 if (asoc->peer.prsctp_capable) in sctp_make_init_ack() 413 if (asoc->peer.asconf_capable) { in sctp_make_init_ack() 422 if (asoc->peer.auth_capable) { in sctp_make_init_ack() 466 if (asoc->peer.ecn_capable) in sctp_make_init_ack() 476 if (asoc->peer.prsctp_capable) in sctp_make_init_ack() 486 if (asoc->peer.auth_capable) { in sctp_make_init_ack() 548 cookie = asoc->peer.cookie; in sctp_make_cookie_echo() 549 cookie_len = asoc->peer.cookie_len; in sctp_make_cookie_echo() 739 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; in sctp_make_sack() [all …]
|
D | sm_sideeffect.c | 165 struct sctp_transport *trans = asoc->peer.last_data_from; in sctp_gen_sack() 171 asoc->peer.sack_needed = 1; in sctp_gen_sack() 173 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); in sctp_gen_sack() 174 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); in sctp_gen_sack() 187 asoc->peer.sack_needed = 1; in sctp_gen_sack() 197 if (!asoc->peer.sack_needed) { in sctp_gen_sack() 198 asoc->peer.sack_cnt++; in sctp_gen_sack() 207 if (asoc->peer.sack_cnt >= trans->sackfreq - 1) in sctp_gen_sack() 208 asoc->peer.sack_needed = 1; in sctp_gen_sack() 214 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) in sctp_gen_sack() [all …]
|
D | probe.c | 136 sp = asoc->peer.primary_path; in jsctp_sf_eat_sack() 139 (!port || asoc->peer.port == port || in jsctp_sf_eat_sack() 150 ep->base.bind_addr.port, asoc->peer.port, in jsctp_sf_eat_sack() 151 asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); in jsctp_sf_eat_sack() 153 list_for_each_entry(sp, &asoc->peer.transport_addr_list, in jsctp_sf_eat_sack() 155 if (sp == asoc->peer.primary_path) in jsctp_sf_eat_sack()
|
D | outqueue.c | 230 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, in __sctp_outq_teardown() 412 q->asoc->peer.rwnd += sctp_data_size(chunk); in sctp_retransmit_mark() 432 q->asoc->peer.rwnd += sctp_data_size(chunk); in sctp_retransmit_mark() 489 if (transport == transport->asoc->peer.retran_path) in sctp_retransmit() 724 __u16 dport = asoc->peer.port; in sctp_outq_flush() 725 __u32 vtag = asoc->peer.i.init_tag; in sctp_outq_flush() 790 new_transport = asoc->peer.active_path; in sctp_outq_flush() 810 new_transport = asoc->peer.active_path; in sctp_outq_flush() 824 asoc->peer.ecn_capable); in sctp_outq_flush() 918 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) in sctp_outq_flush() [all …]
|
D | proc.c | 132 struct sctp_transport *peer; in sctp_seq_dump_local_addrs() local 138 peer = asoc->peer.primary_path; in sctp_seq_dump_local_addrs() 139 primary = &peer->saddr; in sctp_seq_dump_local_addrs() 164 primary = &assoc->peer.primary_addr; in sctp_seq_dump_remote_addrs() 166 list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list, in sctp_seq_dump_remote_addrs() 348 assoc->peer.port); in sctp_assocs_seq_show() 457 list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, in sctp_remaddr_seq_show()
|
D | output.c | 255 pkt->transport->asoc->peer.sack_generation) in sctp_packet_bundle_sack() 266 asoc->peer.sack_needed = 0; in sctp_packet_bundle_sack() 588 if (asoc->peer.last_sent_to != tp) in sctp_packet_transmit() 592 asoc->peer.last_sent_to = tp; in sctp_packet_transmit() 673 rwnd = asoc->peer.rwnd; in sctp_packet_can_append_data() 740 u32 rwnd = asoc->peer.rwnd; in sctp_packet_append_data() 754 asoc->peer.rwnd = rwnd; in sctp_packet_append_data() 756 if (!asoc->peer.prsctp_capable) in sctp_packet_append_data()
|
D | auth.c | 247 return sctp_auth_make_key_vector(asoc->peer.peer_random, in sctp_auth_make_peer_vector() 248 asoc->peer.peer_chunks, in sctp_auth_make_peer_vector() 249 asoc->peer.peer_hmacs, in sctp_auth_make_peer_vector() 403 if (!net->sctp.auth_enable || !asoc->peer.auth_capable) in sctp_auth_asoc_init_active_key() 540 hmacs = asoc->peer.peer_hmacs; in sctp_auth_asoc_get_hmac() 685 if (!net->sctp.auth_enable || !asoc->peer.auth_capable) in sctp_auth_send_cid() 688 return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); in sctp_auth_send_cid()
|
/net/core/ |
D | net_namespace.c | 151 static int alloc_netid(struct net *net, struct net *peer, int reqid) in alloc_netid() argument 162 return idr_alloc(&net->netns_ids, peer, min, max, GFP_KERNEL); in alloc_netid() 171 static int net_eq_idr(int id, void *net, void *peer) in net_eq_idr() argument 173 if (net_eq(net, peer)) in net_eq_idr() 178 static int __peernet2id(struct net *net, struct net *peer, bool alloc) in __peernet2id() argument 180 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); in __peernet2id() 191 return alloc_netid(net, peer, -1); in __peernet2id() 199 int peernet2id(struct net *net, struct net *peer) in peernet2id() argument 201 int id = __peernet2id(net, peer, true); in peernet2id() 208 struct net *peer; in get_net_ns_by_id() local [all …]
|
/net/unix/ |
D | diag.c | 40 struct sock *peer; in sk_diag_dump_peer() local 43 peer = unix_peer_get(sk); in sk_diag_dump_peer() 44 if (peer) { in sk_diag_dump_peer() 45 unix_state_lock(peer); in sk_diag_dump_peer() 46 ino = sock_i_ino(peer); in sk_diag_dump_peer() 47 unix_state_unlock(peer); in sk_diag_dump_peer() 48 sock_put(peer); in sk_diag_dump_peer() 74 struct sock *req, *peer; in sk_diag_dump_icons() local 83 peer = unix_sk(req)->peer; in sk_diag_dump_icons() 84 buf[i++] = (peer ? sock_i_ino(peer) : 0); in sk_diag_dump_icons()
|
/net/ipv6/ |
D | output_core.c | 16 struct inet_peer *peer; in ipv6_select_ident() local 20 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1); in ipv6_select_ident() 21 if (peer) { in ipv6_select_ident() 22 fhdr->identification = htonl(inet_getid(peer, 0)); in ipv6_select_ident() 23 inet_putpeer(peer); in ipv6_select_ident()
|
/net/x25/ |
D | x25_forward.c | 98 struct net_device *peer = NULL; in x25_forward_data() local 109 peer = frwd->dev2; in x25_forward_data() 111 peer = frwd->dev1; in x25_forward_data() 118 if ( (nb = x25_get_neigh(peer)) == NULL) in x25_forward_data()
|
/net/ipv4/ |
D | route.c | 490 struct inet_peer *peer; in __ip_select_ident() local 492 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); in __ip_select_ident() 493 if (peer) { in __ip_select_ident() 494 iph->id = htons(inet_getid(peer, more)); in __ip_select_ident() 495 inet_putpeer(peer); in __ip_select_ident() 794 struct inet_peer *peer; in ip_rt_send_redirect() local 808 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); in ip_rt_send_redirect() 809 if (!peer) { in ip_rt_send_redirect() 818 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) in ip_rt_send_redirect() 819 peer->rate_tokens = 0; in ip_rt_send_redirect() [all …]
|
D | inetpeer.c | 555 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) in inet_peer_xrlim_allow() argument 560 if (!peer) in inet_peer_xrlim_allow() 563 token = peer->rate_tokens; in inet_peer_xrlim_allow() 565 token += now - peer->rate_last; in inet_peer_xrlim_allow() 566 peer->rate_last = now; in inet_peer_xrlim_allow() 573 peer->rate_tokens = token; in inet_peer_xrlim_allow()
|
/net/wireless/ |
D | trace.h | 1322 const u8 *peer, const struct cfg80211_bitrate_mask *mask), 1323 TP_ARGS(wiphy, netdev, peer, mask), 1327 MAC_ENTRY(peer) 1332 MAC_ASSIGN(peer, peer); 1335 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) 1446 u8 *peer, u8 action_code, u8 dialog_token, 1448 TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code, 1453 MAC_ENTRY(peer) 1462 MAC_ASSIGN(peer, peer); 1470 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), [all …]
|
D | rdev-ops.h | 543 struct net_device *dev, const u8 *peer, in rdev_set_bitrate_mask() argument 547 trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask); in rdev_set_bitrate_mask() 548 ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask); in rdev_set_bitrate_mask() 773 struct net_device *dev, u8 *peer, in rdev_tdls_mgmt() argument 778 trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code, in rdev_tdls_mgmt() 780 ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, in rdev_tdls_mgmt() 787 struct net_device *dev, u8 *peer, in rdev_tdls_oper() argument 791 trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper); in rdev_tdls_oper() 792 ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper); in rdev_tdls_oper() 798 struct net_device *dev, const u8 *peer, in rdev_probe_client() argument [all …]
|