/net/netfilter/ |
D | nf_conntrack_standalone.c | 110 struct ct_iter_state *st = seq->private; in ct_get_first() local 113 for (st->bucket = 0; in ct_get_first() 114 st->bucket < st->htable_size; in ct_get_first() 115 st->bucket++) { in ct_get_first() 117 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_first() 127 struct ct_iter_state *st = seq->private; in ct_get_next() local 131 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next() 132 if (++st->bucket >= st->htable_size) in ct_get_next() 136 hlist_nulls_first_rcu(&st->hash[st->bucket])); in ct_get_next() 154 struct ct_iter_state *st = seq->private; in ct_seq_start() local [all …]
|
D | xt_recent.c | 482 struct recent_iter_state *st = seq->private; in recent_seq_start() local 483 const struct recent_table *t = st->table; in recent_seq_start() 489 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start() 490 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start() 498 struct recent_iter_state *st = seq->private; in recent_seq_next() local 499 const struct recent_table *t = st->table; in recent_seq_next() 504 while (head == &t->iphash[st->bucket]) { in recent_seq_next() 505 if (++st->bucket >= ip_list_hash_size) in recent_seq_next() 507 head = t->iphash[st->bucket].next; in recent_seq_next() 521 struct recent_iter_state *st = seq->private; in recent_seq_show() local [all …]
|
D | nfnetlink_log.c | 1018 static struct hlist_node *get_first(struct net *net, struct iter_state *st) in get_first() argument 1021 if (!st) in get_first() 1026 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first() 1027 struct hlist_head *head = &log->instance_table[st->bucket]; in get_first() 1035 static struct hlist_node *get_next(struct net *net, struct iter_state *st, in get_next() argument 1043 if (++st->bucket >= INSTANCE_BUCKETS) in get_next() 1047 head = &log->instance_table[st->bucket]; in get_next() 1053 static struct hlist_node *get_idx(struct net *net, struct iter_state *st, in get_idx() argument 1057 head = get_first(net, st); in get_idx() 1060 while (pos && (head = get_next(net, st, head))) in get_idx()
|
D | nf_conntrack_expect.c | 572 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_first() local 575 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first() 576 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_first() 586 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_next() local 590 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next() 592 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket])); in ct_expect_get_next()
|
D | nfnetlink_queue.c | 1427 struct iter_state *st = seq->private; in get_first() local 1431 if (!st) in get_first() 1436 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first() 1437 if (!hlist_empty(&q->instance_table[st->bucket])) in get_first() 1438 return q->instance_table[st->bucket].first; in get_first() 1445 struct iter_state *st = seq->private; in get_next() local 1452 if (++st->bucket >= INSTANCE_BUCKETS) in get_next() 1456 h = q->instance_table[st->bucket].first; in get_next()
|
D | nf_conntrack_netlink.c | 2507 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument 2519 if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || in ctnetlink_ct_stat_cpu_fill_info() 2520 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || in ctnetlink_ct_stat_cpu_fill_info() 2521 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || in ctnetlink_ct_stat_cpu_fill_info() 2523 htonl(st->insert_failed)) || in ctnetlink_ct_stat_cpu_fill_info() 2524 nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) || in ctnetlink_ct_stat_cpu_fill_info() 2525 nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) || in ctnetlink_ct_stat_cpu_fill_info() 2526 nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) || in ctnetlink_ct_stat_cpu_fill_info() 2528 htonl(st->search_restart)) || in ctnetlink_ct_stat_cpu_fill_info() 2530 htonl(st->clash_resolve)) || in ctnetlink_ct_stat_cpu_fill_info() [all …]
|
/net/ipv4/ |
D | tcp_ipv4.c | 2336 struct tcp_iter_state *st = seq->private; in listening_get_first() local 2338 st->offset = 0; in listening_get_first() 2339 for (; st->bucket <= tcp_hashinfo.lhash2_mask; st->bucket++) { in listening_get_first() 2344 ilb2 = &tcp_hashinfo.lhash2[st->bucket]; in listening_get_first() 2367 struct tcp_iter_state *st = seq->private; in listening_get_next() local 2372 ++st->num; in listening_get_next() 2373 ++st->offset; in listening_get_next() 2382 ilb2 = &tcp_hashinfo.lhash2[st->bucket]; in listening_get_next() 2384 ++st->bucket; in listening_get_next() 2390 struct tcp_iter_state *st = seq->private; in listening_get_idx() local [all …]
|
D | ip_input.c | 376 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); in ip_rcv_finish_core() local 378 st[idx&0xFF].o_packets++; in ip_rcv_finish_core() 379 st[idx&0xFF].o_bytes += skb->len; in ip_rcv_finish_core() 380 st[(idx>>16)&0xFF].i_packets++; in ip_rcv_finish_core() 381 st[(idx>>16)&0xFF].i_bytes += skb->len; in ip_rcv_finish_core()
|
/net/ipv6/ |
D | addrconf_core.c | 40 __be32 st; in __ipv6_addr_type() local 42 st = addr->s6_addr32[0]; in __ipv6_addr_type() 47 if ((st & htonl(0xE0000000)) != htonl(0x00000000) && in __ipv6_addr_type() 48 (st & htonl(0xE0000000)) != htonl(0xE0000000)) in __ipv6_addr_type() 52 if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { in __ipv6_addr_type() 59 if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) in __ipv6_addr_type() 62 if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) in __ipv6_addr_type() 65 if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) in __ipv6_addr_type()
|
/net/sched/ |
D | sch_fq.c | 1014 struct tc_fq_qd_stats st; in fq_dump_stats() local 1018 st.gc_flows = q->stat_gc_flows; in fq_dump_stats() 1019 st.highprio_packets = q->stat_internal_packets; in fq_dump_stats() 1020 st.tcp_retrans = 0; in fq_dump_stats() 1021 st.throttled = q->stat_throttled; in fq_dump_stats() 1022 st.flows_plimit = q->stat_flows_plimit; in fq_dump_stats() 1023 st.pkts_too_long = q->stat_pkts_too_long; in fq_dump_stats() 1024 st.allocation_errors = q->stat_allocation_errors; in fq_dump_stats() 1025 st.time_next_delayed_flow = q->time_next_delayed_flow + q->timer_slack - in fq_dump_stats() 1027 st.flows = q->flows; in fq_dump_stats() [all …]
|
D | sch_fq_codel.c | 559 struct tc_fq_codel_xstats st = { in fq_codel_dump_stats() local 564 st.qdisc_stats.maxpacket = q->cstats.maxpacket; in fq_codel_dump_stats() 565 st.qdisc_stats.drop_overlimit = q->drop_overlimit; in fq_codel_dump_stats() 566 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; in fq_codel_dump_stats() 567 st.qdisc_stats.new_flow_count = q->new_flow_count; in fq_codel_dump_stats() 568 st.qdisc_stats.ce_mark = q->cstats.ce_mark; in fq_codel_dump_stats() 569 st.qdisc_stats.memory_usage = q->memory_usage; in fq_codel_dump_stats() 570 st.qdisc_stats.drop_overmemory = q->drop_overmemory; in fq_codel_dump_stats() 574 st.qdisc_stats.new_flows_len++; in fq_codel_dump_stats() 577 st.qdisc_stats.old_flows_len++; in fq_codel_dump_stats() [all …]
|
D | sch_codel.c | 248 struct tc_codel_xstats st = { in codel_dump_stats() local 263 st.drop_next = codel_time_to_us(delta); in codel_dump_stats() 265 st.drop_next = -codel_time_to_us(-delta); in codel_dump_stats() 268 return gnet_stats_copy_app(d, &st, sizeof(st)); in codel_dump_stats()
|
D | sch_red.c | 450 struct tc_red_xstats st = {0}; in red_dump_stats() local 464 st.early = q->stats.prob_drop + q->stats.forced_drop; in red_dump_stats() 465 st.pdrop = q->stats.pdrop; in red_dump_stats() 466 st.other = q->stats.other; in red_dump_stats() 467 st.marked = q->stats.prob_mark + q->stats.forced_mark; in red_dump_stats() 469 return gnet_stats_copy_app(d, &st, sizeof(st)); in red_dump_stats()
|
D | sch_pie.c | 495 struct tc_pie_xstats st = { in pie_dump_stats() local 507 st.dq_rate_estimating = q->params.dq_rate_estimator; in pie_dump_stats() 511 st.avg_dq_rate = q->vars.avg_dq_rate * in pie_dump_stats() 514 return gnet_stats_copy_app(d, &st, sizeof(st)); in pie_dump_stats()
|
D | sch_sfb.c | 599 struct tc_sfb_xstats st = { in sfb_dump_stats() local 608 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); in sfb_dump_stats() 610 return gnet_stats_copy_app(d, &st, sizeof(st)); in sfb_dump_stats()
|
D | sch_fq_pie.c | 502 struct tc_fq_pie_xstats st = { in fq_pie_dump_stats() local 515 st.new_flows_len++; in fq_pie_dump_stats() 518 st.old_flows_len++; in fq_pie_dump_stats() 521 return gnet_stats_copy_app(d, &st, sizeof(st)); in fq_pie_dump_stats()
|
D | sch_choke.c | 463 struct tc_choke_xstats st = { in choke_dump_stats() local 471 return gnet_stats_copy_app(d, &st, sizeof(st)); in choke_dump_stats()
|
D | sch_hhf.c | 683 struct tc_hhf_xstats st = { in hhf_dump_stats() local 690 return gnet_stats_copy_app(d, &st, sizeof(st)); in hhf_dump_stats()
|
/net/core/ |
D | skbuff.c | 3664 unsigned int to, struct skb_seq_state *st) in skb_prepare_seq_read() argument 3666 st->lower_offset = from; in skb_prepare_seq_read() 3667 st->upper_offset = to; in skb_prepare_seq_read() 3668 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read() 3669 st->frag_idx = st->stepped_offset = 0; in skb_prepare_seq_read() 3670 st->frag_data = NULL; in skb_prepare_seq_read() 3671 st->frag_off = 0; in skb_prepare_seq_read() 3701 struct skb_seq_state *st) in skb_seq_read() argument 3703 unsigned int block_limit, abs_offset = consumed + st->lower_offset; in skb_seq_read() 3706 if (unlikely(abs_offset >= st->upper_offset)) { in skb_seq_read() [all …]
|
D | neighbour.c | 2096 struct neigh_statistics *st; in neightbl_fill_info() local 2098 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info() 2099 ndst.ndts_allocs += READ_ONCE(st->allocs); in neightbl_fill_info() 2100 ndst.ndts_destroys += READ_ONCE(st->destroys); in neightbl_fill_info() 2101 ndst.ndts_hash_grows += READ_ONCE(st->hash_grows); in neightbl_fill_info() 2102 ndst.ndts_res_failed += READ_ONCE(st->res_failed); in neightbl_fill_info() 2103 ndst.ndts_lookups += READ_ONCE(st->lookups); in neightbl_fill_info() 2104 ndst.ndts_hits += READ_ONCE(st->hits); in neightbl_fill_info() 2105 ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast); in neightbl_fill_info() 2106 ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast); in neightbl_fill_info() [all …]
|
D | gen_stats.c | 374 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) in gnet_stats_copy_app() argument 377 d->xstats = kmemdup(st, len, GFP_ATOMIC); in gnet_stats_copy_app() 384 return gnet_stats_copy(d, TCA_STATS_APP, st, len, in gnet_stats_copy_app()
|
/net/bridge/netfilter/ |
D | ebt_stp.c | 141 const struct stp_config_pdu *st; in ebt_stp_mt() local 144 st = skb_header_pointer(skb, sizeof(_stph), in ebt_stp_mt() 146 if (st == NULL) in ebt_stp_mt() 148 return ebt_filter_config(info, st); in ebt_stp_mt()
|
/net/mac80211/ |
D | rate.c | 71 struct ieee80211_tx_status *st) in rate_control_tx_status() argument 74 struct sta_info *sta = container_of(st->sta, struct sta_info, sta); in rate_control_tx_status() 82 ref->ops->tx_status_ext(ref->priv, sband, priv_sta, st); in rate_control_tx_status() 83 else if (st->skb) in rate_control_tx_status() 84 ref->ops->tx_status(ref->priv, sband, st->sta, priv_sta, st->skb); in rate_control_tx_status()
|
D | rate.h | 30 struct ieee80211_tx_status *st);
|
/net/batman-adv/ |
D | main.c | 599 struct skb_seq_state st; in batadv_skb_crc32() local 606 skb_prepare_seq_read(skb, from, to, &st); in batadv_skb_crc32() 607 while ((len = skb_seq_read(consumed, &data, &st)) != 0) { in batadv_skb_crc32()
|