/net/netfilter/ |
D | nf_conntrack_standalone.c | 107 struct ct_iter_state *st = seq->private; in ct_get_first() local 110 for (st->bucket = 0; in ct_get_first() 111 st->bucket < st->htable_size; in ct_get_first() 112 st->bucket++) { in ct_get_first() 114 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_first() 124 struct ct_iter_state *st = seq->private; in ct_get_next() local 128 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next() 129 if (++st->bucket >= st->htable_size) in ct_get_next() 133 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_next() 151 struct ct_iter_state *st = seq->private; in ct_seq_start() local [all …]
|
D | xt_recent.c | 482 struct recent_iter_state *st = seq->private; in recent_seq_start() local 483 const struct recent_table *t = st->table; in recent_seq_start() 489 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start() 490 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start() 498 struct recent_iter_state *st = seq->private; in recent_seq_next() local 499 const struct recent_table *t = st->table; in recent_seq_next() 504 while (head == &t->iphash[st->bucket]) { in recent_seq_next() 505 if (++st->bucket >= ip_list_hash_size) in recent_seq_next() 507 head = t->iphash[st->bucket].next; in recent_seq_next() 521 struct recent_iter_state *st = seq->private; in recent_seq_show() local [all …]
|
D | nfnetlink_log.c | 1011 static struct hlist_node *get_first(struct net *net, struct iter_state *st) in get_first() argument 1014 if (!st) in get_first() 1019 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first() 1020 struct hlist_head *head = &log->instance_table[st->bucket]; in get_first() 1028 static struct hlist_node *get_next(struct net *net, struct iter_state *st, in get_next() argument 1036 if (++st->bucket >= INSTANCE_BUCKETS) in get_next() 1040 head = &log->instance_table[st->bucket]; in get_next() 1046 static struct hlist_node *get_idx(struct net *net, struct iter_state *st, in get_idx() argument 1050 head = get_first(net, st); in get_idx() 1053 while (pos && (head = get_next(net, st, head))) in get_idx()
|
D | nf_conntrack_expect.c | 552 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_first() local 555 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first() 556 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_first() 566 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_next() local 570 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next() 572 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_next()
|
D | nfnetlink_queue.c | 1418 struct iter_state *st = seq->private; in get_first() local 1422 if (!st) in get_first() 1427 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first() 1428 if (!hlist_empty(&q->instance_table[st->bucket])) in get_first() 1429 return q->instance_table[st->bucket].first; in get_first() 1436 struct iter_state *st = seq->private; in get_next() local 1443 if (++st->bucket >= INSTANCE_BUCKETS) in get_next() 1447 h = q->instance_table[st->bucket].first; in get_next()
|
D | nf_conntrack_netlink.c | 2190 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument 2202 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || in ctnetlink_ct_stat_cpu_fill_info() 2203 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || in ctnetlink_ct_stat_cpu_fill_info() 2204 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) || in ctnetlink_ct_stat_cpu_fill_info() 2205 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || in ctnetlink_ct_stat_cpu_fill_info() 2207 htonl(st->insert_failed)) || in ctnetlink_ct_stat_cpu_fill_info() 2208 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) || in ctnetlink_ct_stat_cpu_fill_info() 2209 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) || in ctnetlink_ct_stat_cpu_fill_info() 2210 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) || in ctnetlink_ct_stat_cpu_fill_info() 2212 htonl(st->search_restart))) in ctnetlink_ct_stat_cpu_fill_info() [all …]
|
D | x_tables.c | 822 const struct compat_xt_standard_target *st = (const void *)t; in xt_compat_check_entry_offsets() local 824 if (COMPAT_XT_ALIGN(target_offset + sizeof(*st)) != next_offset) in xt_compat_check_entry_offsets() 827 if (!verdict_ok(st->verdict)) in xt_compat_check_entry_offsets() 916 const struct xt_standard_target *st = (const void *)t; in xt_check_entry_offsets() local 918 if (XT_ALIGN(target_offset + sizeof(*st)) != next_offset) in xt_check_entry_offsets() 921 if (!verdict_ok(st->verdict)) in xt_check_entry_offsets()
|
/net/ipv4/ |
D | tcp_ipv4.c | 2175 struct tcp_iter_state *st = seq->private; in listening_get_next() local 2183 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next() 2186 st->offset = 0; in listening_get_next() 2189 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next() 2190 ++st->num; in listening_get_next() 2191 ++st->offset; in listening_get_next() 2202 st->offset = 0; in listening_get_next() 2203 if (++st->bucket < INET_LHTABLE_SIZE) in listening_get_next() 2210 struct tcp_iter_state *st = seq->private; in listening_get_idx() local 2213 st->bucket = 0; in listening_get_idx() [all …]
|
D | ip_input.c | 353 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); in ip_rcv_finish_core() local 355 st[idx&0xFF].o_packets++; in ip_rcv_finish_core() 356 st[idx&0xFF].o_bytes += skb->len; in ip_rcv_finish_core() 357 st[(idx>>16)&0xFF].i_packets++; in ip_rcv_finish_core() 358 st[(idx>>16)&0xFF].i_bytes += skb->len; in ip_rcv_finish_core()
|
/net/ipv6/ |
D | addrconf_core.c | 40 __be32 st; in __ipv6_addr_type() local 42 st = addr->s6_addr32[0]; in __ipv6_addr_type() 47 if ((st & htonl(0xE0000000)) != htonl(0x00000000) && in __ipv6_addr_type() 48 (st & htonl(0xE0000000)) != htonl(0xE0000000)) in __ipv6_addr_type() 52 if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { in __ipv6_addr_type() 59 if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) in __ipv6_addr_type() 62 if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) in __ipv6_addr_type() 65 if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) in __ipv6_addr_type()
|
/net/sched/ |
D | sch_fq.c | 940 struct tc_fq_qd_stats st; in fq_dump_stats() local 944 st.gc_flows = q->stat_gc_flows; in fq_dump_stats() 945 st.highprio_packets = q->stat_internal_packets; in fq_dump_stats() 946 st.tcp_retrans = 0; in fq_dump_stats() 947 st.throttled = q->stat_throttled; in fq_dump_stats() 948 st.flows_plimit = q->stat_flows_plimit; in fq_dump_stats() 949 st.pkts_too_long = q->stat_pkts_too_long; in fq_dump_stats() 950 st.allocation_errors = q->stat_allocation_errors; in fq_dump_stats() 951 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); in fq_dump_stats() 952 st.flows = q->flows; in fq_dump_stats() [all …]
|
D | sch_fq_codel.c | 562 struct tc_fq_codel_xstats st = { in fq_codel_dump_stats() local 567 st.qdisc_stats.maxpacket = q->cstats.maxpacket; in fq_codel_dump_stats() 568 st.qdisc_stats.drop_overlimit = q->drop_overlimit; in fq_codel_dump_stats() 569 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; in fq_codel_dump_stats() 570 st.qdisc_stats.new_flow_count = q->new_flow_count; in fq_codel_dump_stats() 571 st.qdisc_stats.ce_mark = q->cstats.ce_mark; in fq_codel_dump_stats() 572 st.qdisc_stats.memory_usage = q->memory_usage; in fq_codel_dump_stats() 573 st.qdisc_stats.drop_overmemory = q->drop_overmemory; in fq_codel_dump_stats() 577 st.qdisc_stats.new_flows_len++; in fq_codel_dump_stats() 580 st.qdisc_stats.old_flows_len++; in fq_codel_dump_stats() [all …]
|
D | sch_codel.c | 248 struct tc_codel_xstats st = { in codel_dump_stats() local 263 st.drop_next = codel_time_to_us(delta); in codel_dump_stats() 265 st.drop_next = -codel_time_to_us(-delta); in codel_dump_stats() 268 return gnet_stats_copy_app(d, &st, sizeof(st)); in codel_dump_stats()
|
D | sch_red.c | 340 struct tc_red_xstats st = {0}; in red_dump_stats() local 354 st.early = q->stats.prob_drop + q->stats.forced_drop; in red_dump_stats() 355 st.pdrop = q->stats.pdrop; in red_dump_stats() 356 st.other = q->stats.other; in red_dump_stats() 357 st.marked = q->stats.prob_mark + q->stats.forced_mark; in red_dump_stats() 359 return gnet_stats_copy_app(d, &st, sizeof(st)); in red_dump_stats()
|
D | sch_sfb.c | 600 struct tc_sfb_xstats st = { in sfb_dump_stats() local 609 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); in sfb_dump_stats() 611 return gnet_stats_copy_app(d, &st, sizeof(st)); in sfb_dump_stats()
|
D | sch_choke.c | 471 struct tc_choke_xstats st = { in choke_dump_stats() local 479 return gnet_stats_copy_app(d, &st, sizeof(st)); in choke_dump_stats()
|
D | sch_pie.c | 513 struct tc_pie_xstats st = { in pie_dump_stats() local 527 return gnet_stats_copy_app(d, &st, sizeof(st)); in pie_dump_stats()
|
D | sch_hhf.c | 683 struct tc_hhf_xstats st = { in hhf_dump_stats() local 690 return gnet_stats_copy_app(d, &st, sizeof(st)); in hhf_dump_stats()
|
/net/bridge/netfilter/ |
D | ebt_stp.c | 142 const struct stp_config_pdu *st; in ebt_stp_mt() local 145 st = skb_header_pointer(skb, sizeof(_stph), in ebt_stp_mt() 147 if (st == NULL) in ebt_stp_mt() 149 return ebt_filter_config(info, st); in ebt_stp_mt()
|
/net/core/ |
D | skbuff.c | 3473 unsigned int to, struct skb_seq_state *st) in skb_prepare_seq_read() argument 3475 st->lower_offset = from; in skb_prepare_seq_read() 3476 st->upper_offset = to; in skb_prepare_seq_read() 3477 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read() 3478 st->frag_idx = st->stepped_offset = 0; in skb_prepare_seq_read() 3479 st->frag_data = NULL; in skb_prepare_seq_read() 3509 struct skb_seq_state *st) in skb_seq_read() argument 3511 unsigned int block_limit, abs_offset = consumed + st->lower_offset; in skb_seq_read() 3514 if (unlikely(abs_offset >= st->upper_offset)) { in skb_seq_read() 3515 if (st->frag_data) { in skb_seq_read() [all …]
|
D | neighbour.c | 2097 struct neigh_statistics *st; in neightbl_fill_info() local 2099 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info() 2100 ndst.ndts_allocs += READ_ONCE(st->allocs); in neightbl_fill_info() 2101 ndst.ndts_destroys += READ_ONCE(st->destroys); in neightbl_fill_info() 2102 ndst.ndts_hash_grows += READ_ONCE(st->hash_grows); in neightbl_fill_info() 2103 ndst.ndts_res_failed += READ_ONCE(st->res_failed); in neightbl_fill_info() 2104 ndst.ndts_lookups += READ_ONCE(st->lookups); in neightbl_fill_info() 2105 ndst.ndts_hits += READ_ONCE(st->hits); in neightbl_fill_info() 2106 ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast); in neightbl_fill_info() 2107 ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast); in neightbl_fill_info() [all …]
|
D | gen_stats.c | 370 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) in gnet_stats_copy_app() argument 373 d->xstats = kmemdup(st, len, GFP_ATOMIC); in gnet_stats_copy_app() 380 return gnet_stats_copy(d, TCA_STATS_APP, st, len, in gnet_stats_copy_app()
|
/net/mac80211/ |
D | rate.c | 64 struct ieee80211_tx_status *st) in rate_control_tx_status() argument 67 struct sta_info *sta = container_of(st->sta, struct sta_info, sta); in rate_control_tx_status() 75 ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); in rate_control_tx_status() 76 else if (st->skb) in rate_control_tx_status() 77 ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); in rate_control_tx_status()
|
D | rate.h | 30 struct ieee80211_tx_status *st);
|
/net/batman-adv/ |
D | main.c | 644 struct skb_seq_state st; in batadv_skb_crc32() local 651 skb_prepare_seq_read(skb, from, to, &st); in batadv_skb_crc32() 652 while ((len = skb_seq_read(consumed, &data, &st)) != 0) { in batadv_skb_crc32()
|