/net/dccp/ccids/lib/ |
D | loss_interval.c | 24 return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL; in tfrc_lh_peek() 30 BUG_ON(i >= lh->counter); in tfrc_lh_get_interval() 31 return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length; in tfrc_lh_get_interval() 39 if (lh->ring[LIH_INDEX(lh->counter)] == NULL) in tfrc_lh_demand_next() 40 lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab, in tfrc_lh_demand_next() 42 return lh->ring[LIH_INDEX(lh->counter)]; in tfrc_lh_demand_next() 50 for (lh->counter = 0; lh->counter < LIH_SIZE; lh->counter++) in tfrc_lh_cleanup() 51 if (lh->ring[LIH_INDEX(lh->counter)] != NULL) { in tfrc_lh_cleanup() 53 lh->ring[LIH_INDEX(lh->counter)]); in tfrc_lh_cleanup() 54 lh->ring[LIH_INDEX(lh->counter)] = NULL; in tfrc_lh_cleanup() [all …]
|
D | loss_interval.h | 43 u8 counter; member 54 return lh->counter > 0; in tfrc_lh_is_initialised() 59 return min(lh->counter, (u8)LIH_SIZE); in tfrc_lh_length()
|
/net/netfilter/ |
D | nft_counter.c | 24 struct nft_counter __percpu *counter; member 37 this_cpu = this_cpu_ptr(priv->counter); in nft_counter_do_eval() 79 priv->counter = cpu_stats; in nft_counter_do_init() 94 free_percpu(priv->counter); in nft_counter_do_destroy() 111 this_cpu = this_cpu_ptr(priv->counter); in nft_counter_reset() 129 this_cpu = per_cpu_ptr(priv->counter, cpu); in nft_counter_fetch() 248 priv_clone->counter = cpu_stats; in nft_counter_clone() 268 this_cpu = this_cpu_ptr(priv->counter); in nft_counter_offload_stats()
|
D | nft_numgen.c | 20 atomic_t counter; member 29 oval = atomic_read(&priv->counter); in nft_ng_inc_gen() 31 } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval); in nft_ng_inc_gen() 68 atomic_set(&priv->counter, priv->modulus - 1); in nft_ng_inc_init()
|
D | nf_conntrack_standalone.c | 282 struct nf_conn_counter *counter; in seq_print_acct() local 288 counter = acct->counter; in seq_print_acct() 290 (unsigned long long)atomic64_read(&counter[dir].packets), in seq_print_acct() 291 (unsigned long long)atomic64_read(&counter[dir].bytes)); in seq_print_acct()
|
D | xt_connbytes.c | 40 counters = acct->counter; in connbytes_mt()
|
D | nf_conntrack_core.c | 950 struct nf_conn_counter *counter = acct->counter; in nf_ct_acct_add() local 952 atomic64_add(packets, &counter[dir].packets); in nf_ct_acct_add() 953 atomic64_add(bytes, &counter[dir].bytes); in nf_ct_acct_add() 965 struct nf_conn_counter *counter = acct->counter; in nf_ct_acct_merge() local 969 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes); in nf_ct_acct_merge()
|
D | x_tables.c | 1922 struct xt_counters *counter) in xt_percpu_counter_alloc() argument 1924 BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); in xt_percpu_counter_alloc() 1935 counter->pcnt = (__force unsigned long)(state->mem + state->off); in xt_percpu_counter_alloc() 1936 state->off += sizeof(*counter); in xt_percpu_counter_alloc() 1937 if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { in xt_percpu_counter_alloc()
|
D | nft_ct.c | 139 count = nft_ct_get_eval_counter(acct->counter, in nft_ct_get_eval() 149 pcnt = nft_ct_get_eval_counter(acct->counter, in nft_ct_get_eval() 151 bcnt = nft_ct_get_eval_counter(acct->counter, in nft_ct_get_eval()
|
D | nf_conntrack_netlink.c | 250 struct nf_conn_counter *counter = acct->counter; in dump_counters() local 255 pkts = atomic64_xchg(&counter[dir].packets, 0); in dump_counters() 256 bytes = atomic64_xchg(&counter[dir].bytes, 0); in dump_counters() 258 pkts = atomic64_read(&counter[dir].packets); in dump_counters() 259 bytes = atomic64_read(&counter[dir].bytes); in dump_counters()
|
D | Kconfig | 508 tristate "Netfilter nf_tables counter module" 510 This option adds the "counter" expression that you can use to 1202 tristate '"connbytes" per-connection counter match support' 1509 byte counter. 1519 byte counter correctly and not per CPU.
|
/net/openvswitch/ |
D | flow_table.c | 204 u64 counter; in tbl_mask_array_reset_counters() local 209 counter = stats->usage_cntrs[i]; in tbl_mask_array_reset_counters() 212 ma->masks_usage_zero_cntr[i] += counter; in tbl_mask_array_reset_counters() 1103 return (s64)mc_b->counter - (s64)mc_a->counter; in compare_mask_and_count() 1130 masks_and_count[i].counter = 0; in ovs_flow_masks_rebalance() 1135 u64 counter; in ovs_flow_masks_rebalance() local 1140 counter = stats->usage_cntrs[i]; in ovs_flow_masks_rebalance() 1144 masks_and_count[i].counter += counter; in ovs_flow_masks_rebalance() 1148 masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; in ovs_flow_masks_rebalance() 1153 ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; in ovs_flow_masks_rebalance()
|
D | flow_table.h | 38 u64 counter; member
|
/net/netfilter/ipset/ |
D | ip_set_core.c | 518 ip_set_get_bytes(const struct ip_set_counter *counter) in ip_set_get_bytes() argument 520 return (u64)atomic64_read(&(counter)->bytes); in ip_set_get_bytes() 524 ip_set_get_packets(const struct ip_set_counter *counter) in ip_set_get_packets() argument 526 return (u64)atomic64_read(&(counter)->packets); in ip_set_get_packets() 530 ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) in ip_set_put_counter() argument 533 cpu_to_be64(ip_set_get_bytes(counter)), in ip_set_put_counter() 536 cpu_to_be64(ip_set_get_packets(counter)), in ip_set_put_counter() 583 ip_set_match_counter(u64 counter, u64 match, u8 op) in ip_set_match_counter() argument 589 return counter == match; in ip_set_match_counter() 591 return counter != match; in ip_set_match_counter() [all …]
|
/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 109 int counter; /* counter for no expire */ member 304 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { in ip_vs_lblc_check_expire() 307 tbl->counter = 1; in ip_vs_lblc_check_expire() 312 tbl->counter++; in ip_vs_lblc_check_expire() 367 tbl->counter = 1; in ip_vs_lblc_init_svc()
|
D | ip_vs_lblcr.c | 279 int counter; /* counter for no expire */ member 468 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { in ip_vs_lblcr_check_expire() 471 tbl->counter = 1; in ip_vs_lblcr_check_expire() 476 tbl->counter++; in ip_vs_lblcr_check_expire() 530 tbl->counter = 1; in ip_vs_lblcr_init_svc()
|
/net/ipv4/netfilter/ |
D | nf_nat_snmp_basic.asn1 | 43 counter-value 52 big-counter-value
|
D | arp_tables.c | 222 struct xt_counters *counter; in arpt_do_table() local 229 counter = xt_get_this_cpu_counter(&e->counters); in arpt_do_table() 230 ADD_COUNTER(*counter, arp_hdr_len(skb->dev), 1); in arpt_do_table()
|
D | ip_tables.c | 281 struct xt_counters *counter; in ipt_do_table() local 298 counter = xt_get_this_cpu_counter(&e->counters); in ipt_do_table() 299 ADD_COUNTER(*counter, skb->len, 1); in ipt_do_table()
|
/net/sctp/ |
D | objcnt.c | 69 atomic_read(sctp_dbg_objcnt[i].counter)); in sctp_objcnt_seq_show()
|
/net/sched/ |
D | sch_netem.c | 91 u32 counter; member 531 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue() 580 ++q->counter; in netem_enqueue() 588 q->counter = 0; in netem_enqueue() 1004 q->counter = 0; in netem_change()
|
/net/smc/ |
D | smc_cdc.c | 234 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); in smcd_cdc_msg_send() 237 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); in smcd_cdc_msg_send()
|
/net/ipv6/netfilter/ |
D | ip6_tables.c | 304 struct xt_counters *counter; in ip6t_do_table() local 322 counter = xt_get_this_cpu_counter(&e->counters); in ip6t_do_table() 323 ADD_COUNTER(*counter, skb->len, 1); in ip6t_do_table()
|
/net/ceph/ |
D | messenger_v2.c | 835 u64 counter; in gcm_inc_nonce() local 837 counter = le64_to_cpu(nonce->counter); in gcm_inc_nonce() 838 nonce->counter = cpu_to_le64(counter + 1); in gcm_inc_nonce()
|
/net/batman-adv/ |
D | types.h | 2002 atomic_t counter; member
|