/net/rxrpc/ |
D | peer_object.c | 83 static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer, in rxrpc_peer_cmp_key() argument 90 diff = ((peer->hash_key - hash_key) ?: in rxrpc_peer_cmp_key() 91 ((unsigned long)peer->local - (unsigned long)local) ?: in rxrpc_peer_cmp_key() 92 (peer->srx.transport_type - srx->transport_type) ?: in rxrpc_peer_cmp_key() 93 (peer->srx.transport_len - srx->transport_len) ?: in rxrpc_peer_cmp_key() 94 (peer->srx.transport.family - srx->transport.family)); in rxrpc_peer_cmp_key() 100 return ((u16 __force)peer->srx.transport.sin.sin_port - in rxrpc_peer_cmp_key() 102 memcmp(&peer->srx.transport.sin.sin_addr, in rxrpc_peer_cmp_key() 107 return ((u16 __force)peer->srx.transport.sin6.sin6_port - in rxrpc_peer_cmp_key() 109 memcmp(&peer->srx.transport.sin6.sin6_addr, in rxrpc_peer_cmp_key() [all …]
|
D | peer_event.c | 105 static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) in rxrpc_adjust_mtu() argument 112 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { in rxrpc_adjust_mtu() 113 peer->if_mtu = mtu; in rxrpc_adjust_mtu() 119 mtu = peer->if_mtu; in rxrpc_adjust_mtu() 126 if (mtu < peer->hdrsize) in rxrpc_adjust_mtu() 127 mtu = peer->hdrsize + 4; in rxrpc_adjust_mtu() 131 if (mtu < peer->mtu) { in rxrpc_adjust_mtu() 132 spin_lock_bh(&peer->lock); in rxrpc_adjust_mtu() 133 peer->mtu = mtu; in rxrpc_adjust_mtu() 134 peer->maxdata = peer->mtu - peer->hdrsize; in rxrpc_adjust_mtu() [all …]
|
D | conn_service.c | 25 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer, in rxrpc_find_service_conn_rcu() argument 42 read_seqbegin_or_lock(&peer->service_conn_lock, &seq); in rxrpc_find_service_conn_rcu() 44 p = rcu_dereference_raw(peer->service_conns.rb_node); in rxrpc_find_service_conn_rcu() 56 } while (need_seqretry(&peer->service_conn_lock, seq)); in rxrpc_find_service_conn_rcu() 59 done_seqretry(&peer->service_conn_lock, seq); in rxrpc_find_service_conn_rcu() 68 static void rxrpc_publish_service_conn(struct rxrpc_peer *peer, in rxrpc_publish_service_conn() argument 75 write_seqlock_bh(&peer->service_conn_lock); in rxrpc_publish_service_conn() 77 pp = &peer->service_conns.rb_node; in rxrpc_publish_service_conn() 93 rb_insert_color(&conn->service_node, &peer->service_conns); in rxrpc_publish_service_conn() 96 write_sequnlock_bh(&peer->service_conn_lock); in rxrpc_publish_service_conn() [all …]
|
D | conn_object.c | 78 struct rxrpc_peer *peer; in rxrpc_find_connection_rcu() local 104 peer = rxrpc_lookup_peer_rcu(local, &srx); in rxrpc_find_connection_rcu() 105 if (!peer) in rxrpc_find_connection_rcu() 107 conn = rxrpc_find_service_conn_rcu(peer, skb); in rxrpc_find_connection_rcu() 127 peer = conn->params.peer; in rxrpc_find_connection_rcu() 130 if (peer->srx.transport.sin.sin_port != in rxrpc_find_connection_rcu() 132 peer->srx.transport.sin.sin_addr.s_addr != in rxrpc_find_connection_rcu() 138 if (peer->srx.transport.sin6.sin6_port != in rxrpc_find_connection_rcu() 140 memcmp(&peer->srx.transport.sin6.sin6_addr, in rxrpc_find_connection_rcu() 203 spin_lock_bh(&conn->params.peer->lock); in rxrpc_disconnect_call() [all …]
|
D | proc.c | 53 struct rxrpc_peer *peer; in rxrpc_call_seq_show() local 79 peer = call->peer; in rxrpc_call_seq_show() 80 if (peer) in rxrpc_call_seq_show() 81 sprintf(rbuff, "%pISpc", &peer->srx.transport); in rxrpc_call_seq_show() 166 sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport); in rxrpc_connection_seq_show()
|
D | output.c | 80 mtu = call->conn->params.peer->if_mtu; in rxrpc_fill_out_ack() 81 mtu -= call->conn->params.peer->hdrsize; in rxrpc_fill_out_ack() 122 msg.msg_name = &call->peer->srx.transport; in rxrpc_send_ack_packet() 123 msg.msg_namelen = call->peer->srx.transport_len; in rxrpc_send_ack_packet() 231 msg.msg_name = &call->peer->srx.transport; in rxrpc_send_abort_packet() 232 msg.msg_namelen = call->peer->srx.transport_len; in rxrpc_send_abort_packet() 301 msg.msg_name = &call->peer->srx.transport; in rxrpc_send_data_packet() 302 msg.msg_namelen = call->peer->srx.transport_len; in rxrpc_send_data_packet() 313 (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || in rxrpc_send_data_packet() 314 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), in rxrpc_send_data_packet() [all …]
|
D | call_accept.c | 69 struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); in rxrpc_service_prealloc_one() local 70 if (!peer) in rxrpc_service_prealloc_one() 72 b->peer_backlog[head] = peer; in rxrpc_service_prealloc_one() 203 struct rxrpc_peer *peer = b->peer_backlog[tail]; in rxrpc_discard_prealloc() local 204 kfree(peer); in rxrpc_discard_prealloc() 248 struct rxrpc_peer *peer, *xpeer; in rxrpc_alloc_incoming_call() local 280 peer = rxrpc_lookup_incoming_peer(local, xpeer); in rxrpc_alloc_incoming_call() 281 if (peer == xpeer) { in rxrpc_alloc_incoming_call() 295 conn->params.peer = peer; in rxrpc_alloc_incoming_call() 310 call->peer = rxrpc_get_peer(conn->params.peer); in rxrpc_alloc_incoming_call()
|
D | ar-internal.h | 278 struct rxrpc_peer *peer; /* Remote endpoint */ member 467 struct rxrpc_peer *peer; /* Peer record for remote address */ member 1104 static inline struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) in rxrpc_get_peer() argument 1106 atomic_inc(&peer->usage); in rxrpc_get_peer() 1107 return peer; in rxrpc_get_peer() 1111 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) in rxrpc_get_peer_maybe() argument 1113 return atomic_inc_not_zero(&peer->usage) ? peer : NULL; in rxrpc_get_peer_maybe() 1116 extern void __rxrpc_put_peer(struct rxrpc_peer *peer); 1117 static inline void rxrpc_put_peer(struct rxrpc_peer *peer) in rxrpc_put_peer() argument 1119 if (peer && atomic_dec_and_test(&peer->usage)) in rxrpc_put_peer() [all …]
|
D | input.c | 98 if (call->peer->rtt_usage == 0) in rxrpc_congestion_management() 102 call->peer->rtt))) in rxrpc_congestion_management() 209 if (call->peer->rtt_usage < 3 || in rxrpc_send_ping() 210 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) in rxrpc_send_ping() 650 struct rxrpc_peer *peer; in rxrpc_input_ackinfo() local 673 peer = call->peer; in rxrpc_input_ackinfo() 674 if (mtu < peer->maxdata) { in rxrpc_input_ackinfo() 675 spin_lock_bh(&peer->lock); in rxrpc_input_ackinfo() 676 peer->maxdata = mtu; in rxrpc_input_ackinfo() 677 peer->mtu = mtu + peer->hdrsize; in rxrpc_input_ackinfo() [all …]
|
/net/tipc/ |
D | monitor.c | 136 static struct tipc_peer *peer_prev(struct tipc_peer *peer) in peer_prev() argument 138 return list_last_entry(&peer->list, struct tipc_peer, list); in peer_prev() 141 static struct tipc_peer *peer_nxt(struct tipc_peer *peer) in peer_nxt() argument 143 return list_first_entry(&peer->list, struct tipc_peer, list); in peer_nxt() 146 static struct tipc_peer *peer_head(struct tipc_peer *peer) in peer_head() argument 148 while (!peer->is_head) in peer_head() 149 peer = peer_prev(peer); in peer_head() 150 return peer; in peer_head() 155 struct tipc_peer *peer; in get_peer() local 158 hlist_for_each_entry(peer, &mon->peers[thash], hash) { in get_peer() [all …]
|
/net/sctp/ |
D | associola.c | 180 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; in sctp_association_init() 215 INIT_LIST_HEAD(&asoc->peer.transport_addr_list); in sctp_association_init() 228 asoc->peer.sack_needed = 1; in sctp_association_init() 229 asoc->peer.sack_generation = 1; in sctp_association_init() 237 asoc->peer.asconf_capable = 1; in sctp_association_init() 252 asoc->peer.ipv4_address = 1; in sctp_association_init() 254 asoc->peer.ipv6_address = 1; in sctp_association_init() 359 sctp_tsnmap_free(&asoc->peer.tsn_map); in sctp_association_free() 378 kfree(asoc->peer.cookie); in sctp_association_free() 379 kfree(asoc->peer.peer_random); in sctp_association_free() [all …]
|
D | transport.c | 56 struct sctp_transport *peer, in sctp_transport_init() argument 61 peer->ipaddr = *addr; in sctp_transport_init() 62 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); in sctp_transport_init() 63 memset(&peer->saddr, 0, sizeof(union sctp_addr)); in sctp_transport_init() 65 peer->sack_generation = 0; in sctp_transport_init() 73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial); in sctp_transport_init() 75 peer->last_time_heard = ktime_set(0, 0); in sctp_transport_init() 76 peer->last_time_ecne_reduced = jiffies; in sctp_transport_init() 78 peer->param_flags = SPP_HB_DISABLE | in sctp_transport_init() 83 peer->pathmaxrxt = net->sctp.max_retrans_path; in sctp_transport_init() [all …]
|
D | sm_make_chunk.c | 425 if (asoc->peer.ecn_capable) in sctp_make_init_ack() 428 if (asoc->peer.prsctp_capable) in sctp_make_init_ack() 431 if (asoc->peer.asconf_capable) { in sctp_make_init_ack() 440 if (asoc->peer.auth_capable) { in sctp_make_init_ack() 484 if (asoc->peer.ecn_capable) in sctp_make_init_ack() 494 if (asoc->peer.prsctp_capable) in sctp_make_init_ack() 504 if (asoc->peer.auth_capable) { in sctp_make_init_ack() 566 cookie = asoc->peer.cookie; in sctp_make_cookie_echo() 567 cookie_len = asoc->peer.cookie_len; in sctp_make_cookie_echo() 759 struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; in sctp_make_sack() [all …]
|
D | sm_sideeffect.c | 156 struct sctp_transport *trans = asoc->peer.last_data_from; in sctp_gen_sack() 162 asoc->peer.sack_needed = 1; in sctp_gen_sack() 164 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); in sctp_gen_sack() 165 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); in sctp_gen_sack() 178 asoc->peer.sack_needed = 1; in sctp_gen_sack() 188 if (!asoc->peer.sack_needed) { in sctp_gen_sack() 189 asoc->peer.sack_cnt++; in sctp_gen_sack() 198 if (asoc->peer.sack_cnt >= trans->sackfreq - 1) in sctp_gen_sack() 199 asoc->peer.sack_needed = 1; in sctp_gen_sack() 205 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) in sctp_gen_sack() [all …]
|
D | outqueue.c | 221 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, in __sctp_outq_teardown() 307 if (chunk->asoc->peer.prsctp_capable && in sctp_outq_tail() 418 if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable) in sctp_prsctp_prune() 427 list_for_each_entry(transport, &asoc->peer.transport_addr_list, in sctp_prsctp_prune() 467 q->asoc->peer.rwnd += sctp_data_size(chunk); in sctp_retransmit_mark() 487 q->asoc->peer.rwnd += sctp_data_size(chunk); in sctp_retransmit_mark() 541 if (transport == transport->asoc->peer.retran_path) in sctp_retransmit() 772 __u16 dport = asoc->peer.port; in sctp_outq_flush() 773 __u32 vtag = asoc->peer.i.init_tag; in sctp_outq_flush() 838 new_transport = asoc->peer.active_path; in sctp_outq_flush() [all …]
|
D | probe.c | 143 sp = asoc->peer.primary_path; in jsctp_sf_eat_sack() 146 asoc->peer.port == port || in jsctp_sf_eat_sack() 159 ep->base.bind_addr.port, asoc->peer.port, in jsctp_sf_eat_sack() 160 asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); in jsctp_sf_eat_sack() 162 list_for_each_entry(sp, &asoc->peer.transport_addr_list, in jsctp_sf_eat_sack() 164 if (sp == asoc->peer.primary_path) in jsctp_sf_eat_sack()
|
D | proc.c | 129 struct sctp_transport *peer; in sctp_seq_dump_local_addrs() local 136 peer = asoc->peer.primary_path; in sctp_seq_dump_local_addrs() 137 if (unlikely(peer == NULL)) { in sctp_seq_dump_local_addrs() 142 primary = &peer->saddr; in sctp_seq_dump_local_addrs() 167 primary = &assoc->peer.primary_addr; in sctp_seq_dump_remote_addrs() 168 list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list, in sctp_seq_dump_remote_addrs() 357 assoc->peer.port); in sctp_assocs_seq_show() 435 list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, in sctp_remaddr_seq_show()
|
D | sctp_diag.c | 16 struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer; in inet_diag_msg_sctpasoc_fill() 20 paddr = asoc->peer.primary_path->ipaddr; in inet_diag_msg_sctpasoc_fill() 21 dst = asoc->peer.primary_path->dst; in inet_diag_msg_sctpasoc_fill() 25 r->id.idiag_dport = htons(asoc->peer.port); in inet_diag_msg_sctpasoc_fill() 90 addrlen * asoc->peer.transport_count); in inet_diag_msg_sctpaddrs_fill() 95 list_for_each_entry(from, &asoc->peer.transport_addr_list, in inet_diag_msg_sctpaddrs_fill() 228 + nla_total_size(addrlen * asoc->peer.transport_count) in inet_assoc_attr_size() 300 if (r->id.idiag_dport != htons(assoc->peer.port) && in sctp_sock_dump()
|
/net/bluetooth/ |
D | 6lowpan.c | 90 struct lowpan_peer *peer) in peer_add() argument 92 list_add_rcu(&peer->list, &dev->peers); in peer_add() 97 struct lowpan_peer *peer) in peer_del() argument 99 list_del_rcu(&peer->list); in peer_del() 100 kfree_rcu(peer, rcu); in peer_del() 115 struct lowpan_peer *peer; in peer_lookup_ba() local 122 list_for_each_entry_rcu(peer, &dev->peers, list) { in peer_lookup_ba() 124 &peer->chan->dst, peer->chan->dst_type); in peer_lookup_ba() 126 if (bacmp(&peer->chan->dst, ba)) in peer_lookup_ba() 129 if (type == peer->chan->dst_type) { in peer_lookup_ba() [all …]
|
/net/core/ |
D | net_namespace.c | 153 static int alloc_netid(struct net *net, struct net *peer, int reqid) in alloc_netid() argument 162 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); in alloc_netid() 171 static int net_eq_idr(int id, void *net, void *peer) in net_eq_idr() argument 173 if (net_eq(net, peer)) in net_eq_idr() 182 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) in __peernet2id_alloc() argument 184 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); in __peernet2id_alloc() 196 id = alloc_netid(net, peer, -1); in __peernet2id_alloc() 205 static int __peernet2id(struct net *net, struct net *peer) in __peernet2id() argument 209 return __peernet2id_alloc(net, peer, &no); in __peernet2id() 216 int peernet2id_alloc(struct net *net, struct net *peer) in peernet2id_alloc() argument [all …]
|
/net/mac80211/ |
D | tdls.c | 198 struct sk_buff *skb, const u8 *peer, in ieee80211_tdls_add_link_ie() argument 206 rsp_addr = peer; in ieee80211_tdls_add_link_ie() 208 init_addr = peer; in ieee80211_tdls_add_link_ie() 359 struct sk_buff *skb, const u8 *peer, in ieee80211_tdls_add_setup_start_ies() argument 431 sta = sta_info_get(sdata, peer); in ieee80211_tdls_add_setup_start_ies() 473 ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); in ieee80211_tdls_add_setup_start_ies() 544 struct sk_buff *skb, const u8 *peer, in ieee80211_tdls_add_setup_cfm_ies() argument 561 sta = sta_info_get(sdata, peer); in ieee80211_tdls_add_setup_cfm_ies() 620 ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator); in ieee80211_tdls_add_setup_cfm_ies() 649 struct sk_buff *skb, const u8 *peer, in ieee80211_tdls_add_chan_switch_req_ies() argument [all …]
|
/net/unix/ |
D | diag.c | 40 struct sock *peer; in sk_diag_dump_peer() local 43 peer = unix_peer_get(sk); in sk_diag_dump_peer() 44 if (peer) { in sk_diag_dump_peer() 45 unix_state_lock(peer); in sk_diag_dump_peer() 46 ino = sock_i_ino(peer); in sk_diag_dump_peer() 47 unix_state_unlock(peer); in sk_diag_dump_peer() 48 sock_put(peer); in sk_diag_dump_peer() 74 struct sock *req, *peer; in sk_diag_dump_icons() local 83 peer = unix_sk(req)->peer; in sk_diag_dump_icons() 84 buf[i++] = (peer ? sock_i_ino(peer) : 0); in sk_diag_dump_icons()
|
/net/x25/ |
D | x25_forward.c | 101 struct net_device *peer = NULL; in x25_forward_data() local 112 peer = frwd->dev2; in x25_forward_data() 114 peer = frwd->dev1; in x25_forward_data() 121 if ( (nb = x25_get_neigh(peer)) == NULL) in x25_forward_data()
|
/net/wireless/ |
D | trace.h | 1472 const u8 *peer, const struct cfg80211_bitrate_mask *mask), 1473 TP_ARGS(wiphy, netdev, peer, mask), 1477 MAC_ENTRY(peer) 1482 MAC_ASSIGN(peer, peer); 1485 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer)) 1591 u8 *peer, u8 action_code, u8 dialog_token, 1594 TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code, 1599 MAC_ENTRY(peer) 1610 MAC_ASSIGN(peer, peer); 1621 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), [all …]
|
D | rdev-ops.h | 600 struct net_device *dev, const u8 *peer, in rdev_set_bitrate_mask() argument 604 trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask); in rdev_set_bitrate_mask() 605 ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask); in rdev_set_bitrate_mask() 809 struct net_device *dev, u8 *peer, in rdev_tdls_mgmt() argument 815 trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code, in rdev_tdls_mgmt() 818 ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code, in rdev_tdls_mgmt() 826 struct net_device *dev, u8 *peer, in rdev_tdls_oper() argument 830 trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper); in rdev_tdls_oper() 831 ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper); in rdev_tdls_oper() 837 struct net_device *dev, const u8 *peer, in rdev_probe_client() argument [all …]
|