/net/netfilter/ipvs/ |
D | ip_vs_sh.c | 55 struct ip_vs_dest __rcu *dest; /* real server (cache) */ member 74 static inline bool is_unavailable(struct ip_vs_dest *dest) in is_unavailable() argument 76 return atomic_read(&dest->weight) <= 0 || in is_unavailable() 77 dest->flags & IP_VS_DEST_F_OVERLOAD; in is_unavailable() 108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get() local 110 return (!dest || is_unavailable(dest)) ? NULL : dest; in ip_vs_sh_get() 126 struct ip_vs_dest *dest; in ip_vs_sh_get_fallback() local 130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback() 131 if (!dest) in ip_vs_sh_get_fallback() 133 if (!is_unavailable(dest)) in ip_vs_sh_get_fallback() [all …]
|
D | ip_vs_mh.c | 39 struct ip_vs_dest __rcu *dest; /* real server (cache) */ member 81 static inline bool is_unavailable(struct ip_vs_dest *dest) in is_unavailable() argument 83 return atomic_read(&dest->weight) <= 0 || in is_unavailable() 84 dest->flags & IP_VS_DEST_F_OVERLOAD; in is_unavailable() 109 struct ip_vs_dest *dest; in ip_vs_mh_reset() local 113 dest = rcu_dereference_protected(l->dest, 1); in ip_vs_mh_reset() 114 if (dest) { in ip_vs_mh_reset() 115 ip_vs_dest_put(dest); in ip_vs_mh_reset() 116 RCU_INIT_POINTER(l->dest, NULL); in ip_vs_mh_reset() 127 struct ip_vs_dest *dest; in ip_vs_mh_permutate() local [all …]
|
D | ip_vs_dh.c | 50 struct ip_vs_dest __rcu *dest; /* real server (cache) */ member 90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get() 103 struct ip_vs_dest *dest; in ip_vs_dh_reassign() local 110 dest = rcu_dereference_protected(b->dest, 1); in ip_vs_dh_reassign() 111 if (dest) in ip_vs_dh_reassign() 112 ip_vs_dest_put(dest); in ip_vs_dh_reassign() 114 RCU_INIT_POINTER(b->dest, NULL); in ip_vs_dh_reassign() 119 dest = list_entry(p, struct ip_vs_dest, n_list); in ip_vs_dh_reassign() 120 ip_vs_dest_hold(dest); in ip_vs_dh_reassign() 121 RCU_INIT_POINTER(b->dest, dest); in ip_vs_dh_reassign() [all …]
|
D | ip_vs_lblcr.c | 88 struct ip_vs_dest *dest; /* destination server */ member 100 struct ip_vs_dest *dest, bool check) in ip_vs_dest_set_insert() argument 106 if (e->dest == dest) in ip_vs_dest_set_insert() 115 ip_vs_dest_hold(dest); in ip_vs_dest_set_insert() 116 e->dest = dest; in ip_vs_dest_set_insert() 129 ip_vs_dest_put_and_free(e->dest); in ip_vs_lblcr_elem_rcu_free() 134 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) in ip_vs_dest_set_erase() argument 139 if (e->dest == dest) { in ip_vs_dest_set_erase() 164 struct ip_vs_dest *dest, *least; in ip_vs_dest_set_min() local 169 least = e->dest; in ip_vs_dest_set_min() [all …]
|
D | ip_vs_wrr.c | 71 struct ip_vs_dest *dest; in ip_vs_wrr_gcd_weight() local 75 list_for_each_entry(dest, &svc->destinations, n_list) { in ip_vs_wrr_gcd_weight() 76 weight = atomic_read(&dest->weight); in ip_vs_wrr_gcd_weight() 93 struct ip_vs_dest *dest; in ip_vs_wrr_max_weight() local 96 list_for_each_entry(dest, &svc->destinations, n_list) { in ip_vs_wrr_max_weight() 97 new_weight = atomic_read(&dest->weight); in ip_vs_wrr_max_weight() 139 struct ip_vs_dest *dest) in ip_vs_wrr_dest_changed() argument 163 struct ip_vs_dest *dest, *last, *stop = NULL; in ip_vs_wrr_schedule() local 170 dest = mark->cl; in ip_vs_wrr_schedule() 174 last = dest; in ip_vs_wrr_schedule() [all …]
|
D | ip_vs_ctl.c | 453 __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc) in __ip_vs_bind_svc() argument 456 rcu_assign_pointer(dest->svc, svc); in __ip_vs_bind_svc() 509 static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) in ip_vs_rs_hash() argument 514 if (dest->in_rs_table) in ip_vs_rs_hash() 517 switch (IP_VS_DFWD_METHOD(dest)) { in ip_vs_rs_hash() 519 port = dest->port; in ip_vs_rs_hash() 522 switch (dest->tun_type) { in ip_vs_rs_hash() 524 port = dest->tun_port; in ip_vs_rs_hash() 542 hash = ip_vs_rs_hashkey(dest->af, &dest->addr, port); in ip_vs_rs_hash() 544 hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]); in ip_vs_rs_hash() [all …]
|
D | ip_vs_rr.c | 33 static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest) in ip_vs_rr_del_dest() argument 42 if (p == &dest->n_list) in ip_vs_rr_del_dest() 57 struct ip_vs_dest *dest, *last; in ip_vs_rr_schedule() local 64 last = dest = list_entry(p, struct ip_vs_dest, n_list); in ip_vs_rr_schedule() 67 list_for_each_entry_continue_rcu(dest, in ip_vs_rr_schedule() 70 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_rr_schedule() 71 atomic_read(&dest->weight) > 0) in ip_vs_rr_schedule() 74 if (dest == last) in ip_vs_rr_schedule() 89 svc->sched_data = &dest->n_list; in ip_vs_rr_schedule() 93 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), in ip_vs_rr_schedule() [all …]
|
D | ip_vs_lblc.c | 92 struct ip_vs_dest *dest; /* real server (cache) */ member 136 ip_vs_dest_put_and_free(en->dest); in ip_vs_lblc_rcu_free() 199 u16 af, struct ip_vs_dest *dest) in ip_vs_lblc_new() argument 205 if (en->dest == dest) in ip_vs_lblc_new() 217 ip_vs_dest_hold(dest); in ip_vs_lblc_new() 218 en->dest = dest; in ip_vs_lblc_new() 402 struct ip_vs_dest *dest, *least; in __ip_vs_lblc_schedule() local 417 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in __ip_vs_lblc_schedule() 418 if (dest->flags & IP_VS_DEST_F_OVERLOAD) in __ip_vs_lblc_schedule() 420 if (atomic_read(&dest->weight) > 0) { in __ip_vs_lblc_schedule() [all …]
|
D | ip_vs_sed.c | 43 ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) in ip_vs_sed_dest_overhead() argument 49 return atomic_read(&dest->activeconns) + 1; in ip_vs_sed_dest_overhead() 60 struct ip_vs_dest *dest, *least; in ip_vs_sed_schedule() local 78 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_sed_schedule() 79 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_sed_schedule() 80 atomic_read(&dest->weight) > 0) { in ip_vs_sed_schedule() 81 least = dest; in ip_vs_sed_schedule() 93 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) { in ip_vs_sed_schedule() 94 if (dest->flags & IP_VS_DEST_F_OVERLOAD) in ip_vs_sed_schedule() 96 doh = ip_vs_sed_dest_overhead(dest); in ip_vs_sed_schedule() [all …]
|
D | ip_vs_nq.c | 39 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest) in ip_vs_nq_dest_overhead() argument 45 return atomic_read(&dest->activeconns) + 1; in ip_vs_nq_dest_overhead() 56 struct ip_vs_dest *dest, *least = NULL; in ip_vs_nq_schedule() local 74 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_nq_schedule() 76 if (dest->flags & IP_VS_DEST_F_OVERLOAD || in ip_vs_nq_schedule() 77 !atomic_read(&dest->weight)) in ip_vs_nq_schedule() 80 doh = ip_vs_nq_dest_overhead(dest); in ip_vs_nq_schedule() 83 if (atomic_read(&dest->activeconns) == 0) { in ip_vs_nq_schedule() 84 least = dest; in ip_vs_nq_schedule() 90 ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_nq_schedule() [all …]
|
D | ip_vs_wlc.c | 32 struct ip_vs_dest *dest, *least; in ip_vs_wlc_schedule() local 50 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_wlc_schedule() 51 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_wlc_schedule() 52 atomic_read(&dest->weight) > 0) { in ip_vs_wlc_schedule() 53 least = dest; in ip_vs_wlc_schedule() 65 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) { in ip_vs_wlc_schedule() 66 if (dest->flags & IP_VS_DEST_F_OVERLOAD) in ip_vs_wlc_schedule() 68 doh = ip_vs_dest_conn_overhead(dest); in ip_vs_wlc_schedule() 69 if ((__s64)loh * atomic_read(&dest->weight) > in ip_vs_wlc_schedule() 71 least = dest; in ip_vs_wlc_schedule()
|
D | ip_vs_conn.c | 561 static inline int ip_vs_dest_totalconns(struct ip_vs_dest *dest) in ip_vs_dest_totalconns() argument 563 return atomic_read(&dest->activeconns) in ip_vs_dest_totalconns() 564 + atomic_read(&dest->inactconns); in ip_vs_dest_totalconns() 572 ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) in ip_vs_bind_dest() argument 578 if (!dest) in ip_vs_bind_dest() 582 ip_vs_dest_hold(dest); in ip_vs_bind_dest() 584 conn_flags = atomic_read(&dest->conn_flags); in ip_vs_bind_dest() 600 cp->dest = dest; in ip_vs_bind_dest() 611 refcount_read(&dest->refcnt)); in ip_vs_bind_dest() 620 atomic_inc(&dest->activeconns); in ip_vs_bind_dest() [all …]
|
D | ip_vs_xmit.c | 73 __ip_vs_dst_set(struct ip_vs_dest *dest, struct ip_vs_dest_dst *dest_dst, in __ip_vs_dst_set() argument 78 old = rcu_dereference_protected(dest->dest_dst, in __ip_vs_dst_set() 79 lockdep_is_held(&dest->dst_lock)); in __ip_vs_dst_set() 85 rcu_assign_pointer(dest->dest_dst, dest_dst); in __ip_vs_dst_set() 92 __ip_vs_dst_check(struct ip_vs_dest *dest) in __ip_vs_dst_check() argument 94 struct ip_vs_dest_dst *dest_dst = rcu_dereference(dest->dest_dst); in __ip_vs_dst_check() 308 struct ip_vs_dest *dest, in __ip_vs_get_out_rt() argument 318 if (dest) { in __ip_vs_get_out_rt() 319 dest_dst = __ip_vs_dst_check(dest); in __ip_vs_get_out_rt() 324 spin_lock_bh(&dest->dst_lock); in __ip_vs_get_out_rt() [all …]
|
D | ip_vs_core.c | 136 struct ip_vs_dest *dest = cp->dest; in ip_vs_in_stats() local 139 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { in ip_vs_in_stats() 145 s = this_cpu_ptr(dest->stats.cpustats); in ip_vs_in_stats() 151 svc = rcu_dereference(dest->svc); in ip_vs_in_stats() 172 struct ip_vs_dest *dest = cp->dest; in ip_vs_out_stats() local 175 if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { in ip_vs_out_stats() 181 s = this_cpu_ptr(dest->stats.cpustats); in ip_vs_out_stats() 187 svc = rcu_dereference(dest->svc); in ip_vs_out_stats() 213 s = this_cpu_ptr(cp->dest->stats.cpustats); in ip_vs_conn_stats() 270 struct ip_vs_dest *dest; in ip_vs_sched_persist() local [all …]
|
D | ip_vs_fo.c | 24 struct ip_vs_dest *dest, *hweight = NULL; in ip_vs_fo_schedule() local 32 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_fo_schedule() 33 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && in ip_vs_fo_schedule() 34 atomic_read(&dest->weight) > hw) { in ip_vs_fo_schedule() 35 hweight = dest; in ip_vs_fo_schedule() 36 hw = atomic_read(&dest->weight); in ip_vs_fo_schedule()
|
D | ip_vs_ovf.c | 28 struct ip_vs_dest *dest, *h = NULL; in ip_vs_ovf_schedule() local 35 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_ovf_schedule() 36 w = atomic_read(&dest->weight); in ip_vs_ovf_schedule() 37 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || in ip_vs_ovf_schedule() 38 atomic_read(&dest->activeconns) > w || in ip_vs_ovf_schedule() 42 h = dest; in ip_vs_ovf_schedule()
|
D | ip_vs_lc.c | 27 struct ip_vs_dest *dest, *least = NULL; in ip_vs_lc_schedule() local 41 list_for_each_entry_rcu(dest, &svc->destinations, n_list) { in ip_vs_lc_schedule() 42 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || in ip_vs_lc_schedule() 43 atomic_read(&dest->weight) == 0) in ip_vs_lc_schedule() 45 doh = ip_vs_dest_conn_overhead(dest); in ip_vs_lc_schedule() 47 least = dest; in ip_vs_lc_schedule()
|
/net/netfilter/ |
D | nft_meta.c | 67 u32 *dest = ®s->data[priv->dreg]; in nft_meta_get_eval() local 71 *dest = skb->len; in nft_meta_get_eval() 74 nft_reg_store16(dest, (__force u16)skb->protocol); in nft_meta_get_eval() 77 nft_reg_store8(dest, nft_pf(pkt)); in nft_meta_get_eval() 82 nft_reg_store8(dest, pkt->tprot); in nft_meta_get_eval() 85 *dest = skb->priority; in nft_meta_get_eval() 88 *dest = skb->mark; in nft_meta_get_eval() 91 *dest = in ? in->ifindex : 0; in nft_meta_get_eval() 94 *dest = out ? out->ifindex : 0; in nft_meta_get_eval() 97 strncpy((char *)dest, in ? in->name : "", IFNAMSIZ); in nft_meta_get_eval() [all …]
|
D | xt_addrtype.c | 97 if (ret && info->dest) in addrtype_mt6() 98 ret &= match_type6(net, dev, &iph->daddr, info->dest) ^ in addrtype_mt6() 121 if (info->dest) in addrtype_mt_v0() 122 ret &= match_type(net, NULL, iph->daddr, info->dest) ^ in addrtype_mt_v0() 150 if (ret && info->dest) in addrtype_mt_v1() 151 ret &= match_type(net, dev, iph->daddr, info->dest) ^ in addrtype_mt_v1() 181 if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) { in addrtype_mt_checkentry_v1() 185 if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) { in addrtype_mt_checkentry_v1() 189 if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) { in addrtype_mt_checkentry_v1()
|
/net/6lowpan/ |
D | nhc_udp.c | 50 fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest)); in udp_uncompress() 55 uh.dest = htons(val + LOWPAN_NHC_UDP_8BIT_PORT); in udp_uncompress() 60 fail |= lowpan_fetch_skb(skb, &uh.dest, sizeof(uh.dest)); in udp_uncompress() 65 uh.dest = htons(LOWPAN_NHC_UDP_4BIT_PORT + (val & 0x0f)); in udp_uncompress() 72 ntohs(uh.source), ntohs(uh.dest)); in udp_uncompress() 123 ((ntohs(uh->dest) & LOWPAN_NHC_UDP_4BIT_MASK) == in udp_compress() 130 tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_4BIT_PORT + in udp_compress() 133 } else if ((ntohs(uh->dest) & LOWPAN_NHC_UDP_8BIT_MASK) == in udp_compress() 142 tmp = ntohs(uh->dest) - LOWPAN_NHC_UDP_8BIT_PORT; in udp_compress() 154 lowpan_push_hc_data(hc_ptr, &uh->dest, sizeof(uh->dest)); in udp_compress() [all …]
|
/net/mac802154/ |
D | rx.c | 45 switch (mac_cb(skb)->dest.mode) { in ieee802154_subif_frame() 55 if (mac_cb(skb)->dest.pan_id != span && in ieee802154_subif_frame() 56 mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) in ieee802154_subif_frame() 58 else if (mac_cb(skb)->dest.extended_addr == wpan_dev->extended_addr) in ieee802154_subif_frame() 64 if (mac_cb(skb)->dest.pan_id != span && in ieee802154_subif_frame() 65 mac_cb(skb)->dest.pan_id != cpu_to_le16(IEEE802154_PANID_BROADCAST)) in ieee802154_subif_frame() 67 else if (mac_cb(skb)->dest.short_addr == sshort) in ieee802154_subif_frame() 69 else if (mac_cb(skb)->dest.short_addr == in ieee802154_subif_frame() 152 ieee802154_print_addr("destination", &hdr->dest); in ieee802154_parse_frame_start() 156 cb->dest = hdr->dest; in ieee802154_parse_frame_start()
|
/net/ax25/ |
D | ax25_in.c | 186 ax25_address src, dest, *next_digi = NULL; in ax25_rcv() local 206 if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) in ax25_rcv() 221 if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi) in ax25_rcv() 225 if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi) in ax25_rcv() 232 ax25_send_to_raw(&dest, skb, skb->data[1]); in ax25_rcv() 234 if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) in ax25_rcv() 260 sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); in ax25_rcv() 303 if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) { in ax25_rcv() 328 ax25_return_dm(dev, &src, &dest, &dp); in ax25_rcv() 336 sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET); in ax25_rcv() [all …]
|
/net/sctp/ |
D | bind_addr.c | 33 static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, 43 int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, in sctp_bind_addr_copy() argument 52 dest->port = src->port; in sctp_bind_addr_copy() 56 error = sctp_copy_one_addr(net, dest, &addr->a, scope, in sctp_bind_addr_copy() 66 if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) { in sctp_bind_addr_copy() 68 error = sctp_copy_one_addr(net, dest, &addr->a, in sctp_bind_addr_copy() 79 if (list_empty(&dest->address_list)) in sctp_bind_addr_copy() 84 sctp_bind_addr_clean(dest); in sctp_bind_addr_copy() 94 int sctp_bind_addr_dup(struct sctp_bind_addr *dest, in sctp_bind_addr_dup() argument 102 dest->port = src->port; in sctp_bind_addr_dup() [all …]
|
/net/sunrpc/auth_gss/ |
D | auth_gss_internal.h | 26 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) in simple_get_netobj() argument 38 dest->data = kmemdup(p, len, GFP_NOFS); in simple_get_netobj() 39 if (unlikely(dest->data == NULL)) in simple_get_netobj() 42 dest->data = NULL; in simple_get_netobj() 43 dest->len = len; in simple_get_netobj()
|
/net/dsa/ |
D | tag_mtk.c | 27 unsigned char *dest = eth_hdr(skb)->h_dest; in mtk_tag_xmit() local 28 bool is_multicast_skb = is_multicast_ether_addr(dest) && in mtk_tag_xmit() 29 !is_broadcast_ether_addr(dest); in mtk_tag_xmit() 79 unsigned char *dest = eth_hdr(skb)->h_dest; in mtk_tag_rcv() local 80 bool is_multicast_skb = is_multicast_ether_addr(dest) && in mtk_tag_rcv() 81 !is_broadcast_ether_addr(dest); in mtk_tag_rcv()
|