/net/netfilter/ |
D | nft_counter.c | 26 struct nft_counter counter; member 31 struct nft_counter_percpu __percpu *counter; member 42 this_cpu = this_cpu_ptr(priv->counter); in nft_counter_eval() 44 this_cpu->counter.bytes += pkt->skb->len; in nft_counter_eval() 45 this_cpu->counter.packets++; in nft_counter_eval() 50 static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter, in nft_counter_fetch() argument 60 cpu_stats = per_cpu_ptr(counter, cpu); in nft_counter_fetch() 63 bytes = cpu_stats->counter.bytes; in nft_counter_fetch() 64 packets = cpu_stats->counter.packets; in nft_counter_fetch() 77 nft_counter_fetch(priv->counter, &total); in nft_counter_dump() [all …]
|
D | nf_conntrack_acct.c | 43 struct nf_conn_counter *counter; in seq_print_acct() local 49 counter = acct->counter; in seq_print_acct() 51 (unsigned long long)atomic64_read(&counter[dir].packets), in seq_print_acct() 52 (unsigned long long)atomic64_read(&counter[dir].bytes)); in seq_print_acct()
|
D | nft_numgen.c | 25 atomic_t counter; member 37 oval = atomic_read(&priv->counter); in nft_ng_inc_eval() 39 } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval); in nft_ng_inc_eval() 68 atomic_set(&priv->counter, 0); in nft_ng_inc_init()
|
D | xt_set.c | 164 match_counter0(u64 counter, const struct ip_set_counter_match0 *info) in match_counter0() argument 170 return counter == info->value; in match_counter0() 172 return counter != info->value; in match_counter0() 174 return counter < info->value; in match_counter0() 176 return counter > info->value; in match_counter0() 211 match_counter(u64 counter, const struct ip_set_counter_match *info) in match_counter() argument 217 return counter == info->value; in match_counter() 219 return counter != info->value; in match_counter() 221 return counter < info->value; in match_counter() 223 return counter > info->value; in match_counter()
|
D | nf_conntrack_core.c | 658 struct nf_conn_counter *counter = acct->counter; in nf_ct_acct_update() local 660 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets); in nf_ct_acct_update() 661 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes); in nf_ct_acct_update() 672 struct nf_conn_counter *counter = acct->counter; in nf_ct_acct_merge() local 676 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes); in nf_ct_acct_merge()
|
D | xt_connbytes.c | 40 counters = acct->counter; in connbytes_mt()
|
D | x_tables.c | 1677 struct xt_counters *counter) in xt_percpu_counter_alloc() argument 1679 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); in xt_percpu_counter_alloc() 1690 counter->pcnt = (__force unsigned long)(state->mem + state->off); in xt_percpu_counter_alloc() 1691 state->off += sizeof(*counter); in xt_percpu_counter_alloc() 1692 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { in xt_percpu_counter_alloc()
|
D | nf_conntrack_netlink.c | 224 struct nf_conn_counter *counter = acct->counter; in dump_counters() local 229 pkts = atomic64_xchg(&counter[dir].packets, 0); in dump_counters() 230 bytes = atomic64_xchg(&counter[dir].bytes, 0); in dump_counters() 232 pkts = atomic64_read(&counter[dir].packets); in dump_counters() 233 bytes = atomic64_read(&counter[dir].bytes); in dump_counters()
|
D | nft_ct.c | 126 count = nft_ct_get_eval_counter(acct->counter, in nft_ct_get_eval()
|
D | Kconfig | 503 tristate "Netfilter nf_tables counter module" 505 This option adds the "counter" expression that you can use to 1052 tristate '"connbytes" per-connection counter match support' 1375 byte counter. 1385 byte counter correctly and not per CPU.
|
/net/dccp/ccids/lib/ |
D | loss_interval.c | 28 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; in tfrc_lh_peek() 34 BUG_ON(i >= lh->counter); in tfrc_lh_get_interval() 35 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; in tfrc_lh_get_interval() 43 if (lh->ring[LIH_INDEX(lh->counter)] == NULL) in tfrc_lh_demand_next() 44 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, in tfrc_lh_demand_next() 46 return lh->ring[LIH_INDEX(lh->counter)]; in tfrc_lh_demand_next() 54 for (lh->counter = 0; lh->counter < LIH_SIZE; lh->counter++) in tfrc_lh_cleanup() 55 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { in tfrc_lh_cleanup() 57 lh->ring[LIH_INDEX(lh->counter)]); in tfrc_lh_cleanup() 58 lh->ring[LIH_INDEX(lh->counter)] = NULL; in tfrc_lh_cleanup() [all …]
|
D | loss_interval.h | 47 u8 counter; member 58 return lh->counter > 0; in tfrc_lh_is_initialised() 63 return min(lh->counter, (u8)LIH_SIZE); in tfrc_lh_length()
|
/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 112 int counter; /* counter for no expire */ member 307 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { in ip_vs_lblc_check_expire() 310 tbl->counter = 1; in ip_vs_lblc_check_expire() 315 tbl->counter++; in ip_vs_lblc_check_expire() 370 tbl->counter = 1; in ip_vs_lblc_init_svc()
|
D | ip_vs_lblcr.c | 282 int counter; /* counter for no expire */ member 471 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { in ip_vs_lblcr_check_expire() 474 tbl->counter = 1; in ip_vs_lblcr_check_expire() 479 tbl->counter++; in ip_vs_lblcr_check_expire() 533 tbl->counter = 1; in ip_vs_lblcr_init_svc()
|
/net/sched/ |
D | sch_netem.c | 86 u32 counter; member 522 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue() 565 ++q->counter; in netem_enqueue() 573 q->counter = 0; in netem_enqueue() 903 q->counter = 0; in netem_change()
|
/net/sctp/ |
D | objcnt.c | 86 atomic_read(sctp_dbg_objcnt[i].counter)); in sctp_objcnt_seq_show()
|
/net/ipv4/netfilter/ |
D | arp_tables.c | 230 struct xt_counters *counter; in arpt_do_table() local 237 counter = xt_get_this_cpu_counter(&e->counters); in arpt_do_table() 238 ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); in arpt_do_table()
|
D | ip_tables.c | 298 struct xt_counters *counter; in ipt_do_table() local 315 counter = xt_get_this_cpu_counter(&e->counters); in ipt_do_table() 316 ADD_COUNTER(*counter, skb->len, 1); in ipt_do_table()
|
/net/ |
D | socket.c | 2578 int counter = 0; in socket_seq_show() local 2581 counter += per_cpu(sockets_in_use, cpu); in socket_seq_show() 2584 if (counter < 0) in socket_seq_show() 2585 counter = 0; in socket_seq_show() 2587 seq_printf(seq, "sockets: used %d\n", counter); in socket_seq_show()
|
/net/ipv6/netfilter/ |
D | ip6_tables.c | 329 struct xt_counters *counter; in ip6t_do_table() local 347 counter = xt_get_this_cpu_counter(&e->counters); in ip6t_do_table() 348 ADD_COUNTER(*counter, skb->len, 1); in ip6t_do_table()
|
/net/batman-adv/ |
D | types.h | 1290 atomic_t counter; member
|
D | translation-table.c | 3590 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter)) in batadv_tt_check_roam_count() 3604 atomic_set(&tt_roam_node->counter, in batadv_tt_check_roam_count()
|
/net/ipv4/ |
D | ping.c | 293 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); in ping_close()
|