Home
last modified time | relevance | path

Searched refs:st (Results 1 – 25 of 32) sorted by relevance

12

/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c35 struct ct_iter_state *st = seq->private; in ct_get_first() local
38 for (st->bucket = 0; in ct_get_first()
39 st->bucket < net->ct.htable_size; in ct_get_first()
40 st->bucket++) { in ct_get_first()
42 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_first()
53 struct ct_iter_state *st = seq->private; in ct_get_next() local
57 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next()
58 if (++st->bucket >= net->ct.htable_size) in ct_get_next()
62 hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_next()
221 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_first() local
[all …]
/net/netfilter/
Dnf_conntrack_standalone.c57 struct ct_iter_state *st = seq->private; in ct_get_first() local
60 for (st->bucket = 0; in ct_get_first()
61 st->bucket < net->ct.htable_size; in ct_get_first()
62 st->bucket++) { in ct_get_first()
63 n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket])); in ct_get_first()
74 struct ct_iter_state *st = seq->private; in ct_get_next() local
78 if (likely(get_nulls_value(head) == st->bucket)) { in ct_get_next()
79 if (++st->bucket >= net->ct.htable_size) in ct_get_next()
84 &net->ct.hash[st->bucket])); in ct_get_next()
102 struct ct_iter_state *st = seq->private; in ct_seq_start() local
[all …]
Dxt_recent.c464 struct recent_iter_state *st = seq->private; in recent_seq_start() local
465 const struct recent_table *t = st->table; in recent_seq_start()
471 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) in recent_seq_start()
472 list_for_each_entry(e, &t->iphash[st->bucket], list) in recent_seq_start()
480 struct recent_iter_state *st = seq->private; in recent_seq_next() local
481 const struct recent_table *t = st->table; in recent_seq_next()
485 while (head == &t->iphash[st->bucket]) { in recent_seq_next()
486 if (++st->bucket >= ip_list_hash_size) in recent_seq_next()
488 head = t->iphash[st->bucket].next; in recent_seq_next()
527 struct recent_iter_state *st; in recent_seq_open() local
[all …]
Dxt_qtaguid_print.c227 char *pp_sock_tag(struct sock_tag *st) in pp_sock_tag() argument
232 if (!st) { in pp_sock_tag()
237 tag_str = pp_tag_t(&st->tag); in pp_sock_tag()
242 st, st->sk, atomic_read( in pp_sock_tag()
243 &st->sk->sk_refcnt), in pp_sock_tag()
244 st->pid, tag_str); in pp_sock_tag()
Dnfnetlink_log.c929 static struct hlist_node *get_first(struct net *net, struct iter_state *st) in get_first() argument
932 if (!st) in get_first()
937 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first()
938 struct hlist_head *head = &log->instance_table[st->bucket]; in get_first()
946 static struct hlist_node *get_next(struct net *net, struct iter_state *st, in get_next() argument
954 if (++st->bucket >= INSTANCE_BUCKETS) in get_next()
958 head = &log->instance_table[st->bucket]; in get_next()
964 static struct hlist_node *get_idx(struct net *net, struct iter_state *st, in get_idx() argument
968 head = get_first(net, st); in get_idx()
971 while (pos && (head = get_next(net, st, head))) in get_idx()
Dnf_conntrack_expect.c446 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_first() local
449 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) { in ct_expect_get_first()
450 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); in ct_expect_get_first()
461 struct ct_expect_iter_state *st = seq->private; in ct_expect_get_next() local
465 if (++st->bucket >= nf_ct_expect_hsize) in ct_expect_get_next()
467 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket])); in ct_expect_get_next()
Dxt_qtaguid_print.h23 char *pp_sock_tag(struct sock_tag *st);
69 static inline char *pp_sock_tag(struct sock_tag *st) in pp_sock_tag() argument
Dnf_conntrack_netlink.c1843 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument
1859 if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) || in ctnetlink_ct_stat_cpu_fill_info()
1860 nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) || in ctnetlink_ct_stat_cpu_fill_info()
1861 nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) || in ctnetlink_ct_stat_cpu_fill_info()
1862 nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) || in ctnetlink_ct_stat_cpu_fill_info()
1863 nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) || in ctnetlink_ct_stat_cpu_fill_info()
1864 nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) || in ctnetlink_ct_stat_cpu_fill_info()
1865 nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) || in ctnetlink_ct_stat_cpu_fill_info()
1866 nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) || in ctnetlink_ct_stat_cpu_fill_info()
1868 htonl(st->insert_failed)) || in ctnetlink_ct_stat_cpu_fill_info()
[all …]
Dnfnetlink_queue_core.c1169 struct iter_state *st = seq->private; in get_first() local
1173 if (!st) in get_first()
1178 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { in get_first()
1179 if (!hlist_empty(&q->instance_table[st->bucket])) in get_first()
1180 return q->instance_table[st->bucket].first; in get_first()
1187 struct iter_state *st = seq->private; in get_next() local
1194 if (++st->bucket >= INSTANCE_BUCKETS) in get_next()
1198 h = q->instance_table[st->bucket].first; in get_next()
Dxt_cluster.c64 __be32 st = addr->s6_addr32[0]; in xt_cluster_ipv6_is_multicast() local
65 return ((st & htonl(0xFF000000)) == htonl(0xFF000000)); in xt_cluster_ipv6_is_multicast()
/net/ipv6/
Daddrconf_core.c29 __be32 st; in __ipv6_addr_type() local
31 st = addr->s6_addr32[0]; in __ipv6_addr_type()
36 if ((st & htonl(0xE0000000)) != htonl(0x00000000) && in __ipv6_addr_type()
37 (st & htonl(0xE0000000)) != htonl(0xE0000000)) in __ipv6_addr_type()
41 if ((st & htonl(0xFF000000)) == htonl(0xFF000000)) { in __ipv6_addr_type()
48 if ((st & htonl(0xFFC00000)) == htonl(0xFE800000)) in __ipv6_addr_type()
51 if ((st & htonl(0xFFC00000)) == htonl(0xFEC00000)) in __ipv6_addr_type()
54 if ((st & htonl(0xFE000000)) == htonl(0xFC000000)) in __ipv6_addr_type()
/net/ipv4/
Dtcp_ipv4.c2251 struct tcp_iter_state *st = seq->private; in listening_get_next() local
2255 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
2258 st->offset = 0; in listening_get_next()
2261 ilb = &tcp_hashinfo.listening_hash[st->bucket]; in listening_get_next()
2262 ++st->num; in listening_get_next()
2263 ++st->offset; in listening_get_next()
2265 if (st->state == TCP_SEQ_STATE_OPENREQ) { in listening_get_next()
2268 icsk = inet_csk(st->syn_wait_sk); in listening_get_next()
2272 if (req->rsk_ops->family == st->family) { in listening_get_next()
2278 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) in listening_get_next()
[all …]
Dip_input.c348 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct); in ip_rcv_finish() local
350 st[idx&0xFF].o_packets++; in ip_rcv_finish()
351 st[idx&0xFF].o_bytes += skb->len; in ip_rcv_finish()
352 st[(idx>>16)&0xFF].i_packets++; in ip_rcv_finish()
353 st[(idx>>16)&0xFF].i_bytes += skb->len; in ip_rcv_finish()
Droute.c287 struct rt_cache_stat *st = v; in rt_cpu_seq_show() local
297 st->in_hit, in rt_cpu_seq_show()
298 st->in_slow_tot, in rt_cpu_seq_show()
299 st->in_slow_mc, in rt_cpu_seq_show()
300 st->in_no_route, in rt_cpu_seq_show()
301 st->in_brd, in rt_cpu_seq_show()
302 st->in_martian_dst, in rt_cpu_seq_show()
303 st->in_martian_src, in rt_cpu_seq_show()
305 st->out_hit, in rt_cpu_seq_show()
306 st->out_slow_tot, in rt_cpu_seq_show()
[all …]
/net/core/
Dskbuff.c2520 unsigned int to, struct skb_seq_state *st) in skb_prepare_seq_read() argument
2522 st->lower_offset = from; in skb_prepare_seq_read()
2523 st->upper_offset = to; in skb_prepare_seq_read()
2524 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
2525 st->frag_idx = st->stepped_offset = 0; in skb_prepare_seq_read()
2526 st->frag_data = NULL; in skb_prepare_seq_read()
2556 struct skb_seq_state *st) in skb_seq_read() argument
2558 unsigned int block_limit, abs_offset = consumed + st->lower_offset; in skb_seq_read()
2561 if (unlikely(abs_offset >= st->upper_offset)) in skb_seq_read()
2565 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset; in skb_seq_read()
[all …]
Dgen_stats.c205 gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) in gnet_stats_copy_app() argument
208 d->xstats = st; in gnet_stats_copy_app()
213 return gnet_stats_copy(d, TCA_STATS_APP, st, len); in gnet_stats_copy_app()
Dneighbour.c1863 struct neigh_statistics *st; in neightbl_fill_info() local
1865 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info()
1866 ndst.ndts_allocs += st->allocs; in neightbl_fill_info()
1867 ndst.ndts_destroys += st->destroys; in neightbl_fill_info()
1868 ndst.ndts_hash_grows += st->hash_grows; in neightbl_fill_info()
1869 ndst.ndts_res_failed += st->res_failed; in neightbl_fill_info()
1870 ndst.ndts_lookups += st->lookups; in neightbl_fill_info()
1871 ndst.ndts_hits += st->hits; in neightbl_fill_info()
1872 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; in neightbl_fill_info()
1873 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; in neightbl_fill_info()
[all …]
/net/sched/
Dsch_codel.c215 struct tc_codel_xstats st = { in codel_dump_stats() local
229 st.drop_next = codel_time_to_us(delta); in codel_dump_stats()
231 st.drop_next = -codel_time_to_us(-delta); in codel_dump_stats()
234 return gnet_stats_copy_app(d, &st, sizeof(st)); in codel_dump_stats()
Dsch_fq_codel.c463 struct tc_fq_codel_xstats st = { in fq_codel_dump_stats() local
468 st.qdisc_stats.maxpacket = q->cstats.maxpacket; in fq_codel_dump_stats()
469 st.qdisc_stats.drop_overlimit = q->drop_overlimit; in fq_codel_dump_stats()
470 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; in fq_codel_dump_stats()
471 st.qdisc_stats.new_flow_count = q->new_flow_count; in fq_codel_dump_stats()
474 st.qdisc_stats.new_flows_len++; in fq_codel_dump_stats()
477 st.qdisc_stats.old_flows_len++; in fq_codel_dump_stats()
479 return gnet_stats_copy_app(d, &st, sizeof(st)); in fq_codel_dump_stats()
Dsch_sfb.c587 struct tc_sfb_xstats st = { in sfb_dump_stats() local
596 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); in sfb_dump_stats()
598 return gnet_stats_copy_app(d, &st, sizeof(st)); in sfb_dump_stats()
Dsch_red.c288 struct tc_red_xstats st = { in red_dump_stats() local
295 return gnet_stats_copy_app(d, &st, sizeof(st)); in red_dump_stats()
Dsch_choke.c529 struct tc_choke_xstats st = { in choke_dump_stats() local
537 return gnet_stats_copy_app(d, &st, sizeof(st)); in choke_dump_stats()
/net/bridge/netfilter/
Debt_stp.c144 const struct stp_config_pdu *st; in ebt_stp_mt() local
147 st = skb_header_pointer(skb, sizeof(_stph), in ebt_stp_mt()
149 if (st == NULL) in ebt_stp_mt()
151 return ebt_filter_config(info, st); in ebt_stp_mt()
/net/ipx/
Dipx_route.c264 struct sockaddr_ipx *sg, *st; in ipxrtr_ioctl() local
271 st = (struct sockaddr_ipx *)&rt.rt_dst; in ipxrtr_ioctl()
276 st->sipx_family != AF_IPX) in ipxrtr_ioctl()
281 rc = ipxrtr_delete(st->sipx_network); in ipxrtr_ioctl()
285 f.ipx_network = st->sipx_network; in ipxrtr_ioctl()
/net/batman-adv/
Dmain.c464 struct skb_seq_state st; in batadv_skb_crc32() local
471 skb_prepare_seq_read(skb, from, to, &st); in batadv_skb_crc32()
472 while ((len = skb_seq_read(consumed, &data, &st)) != 0) { in batadv_skb_crc32()
476 skb_abort_seq_read(&st); in batadv_skb_crc32()

12