Home
last modified time | relevance | path

Searched refs:entries (Results 1 – 25 of 41) sorted by relevance

12

/net/bridge/netfilter/
Debtables.c211 base = private->entries; in ebt_do_table()
443 struct ebt_entry *e = (void *)newinfo->entries + offset; in ebt_verify_pointers()
452 repl->entries + offset) in ebt_verify_pointers()
841 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) in translate_table()
863 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table()
904 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table()
917 cl_s, udc_cnt, i, newinfo->entries)) { in translate_table()
935 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table()
938 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table()
1042 EBT_ENTRY_ITERATE(table->entries, table->entries_size, in do_replace_finish()
[all …]
Debtable_filter.c43 .entries = (char *)initial_chains,
Debtable_nat.c43 .entries = (char *)initial_chains,
Debtable_broute.c36 .entries = (char *)&initial_chain,
/net/xdp/
Dxsk_queue.h98 u32 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail() local
100 if (entries == 0) { in xskq_nb_avail()
103 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail()
106 return (entries > dcnt) ? dcnt : entries; in xskq_nb_avail()
123 u32 entries = q->prod_tail - q->cons_tail; in xskq_has_addrs() local
125 if (entries >= cnt) in xskq_has_addrs()
130 entries = q->prod_tail - q->cons_tail; in xskq_has_addrs()
132 return entries >= cnt; in xskq_has_addrs()
Dxsk.c460 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, in xsk_init_queue() argument
465 if (entries == 0 || *queue || !is_power_of_2(entries)) in xsk_init_queue()
468 q = xskq_create(entries, umem_queue); in xsk_init_queue()
754 int entries; in xsk_setsockopt() local
756 if (optlen < sizeof(entries)) in xsk_setsockopt()
758 if (copy_from_user(&entries, optval, sizeof(entries))) in xsk_setsockopt()
767 err = xsk_init_queue(entries, q, false); in xsk_setsockopt()
810 int entries; in xsk_setsockopt() local
812 if (copy_from_user(&entries, optval, sizeof(entries))) in xsk_setsockopt()
827 err = xsk_init_queue(entries, q, true); in xsk_setsockopt()
/net/netfilter/ipvs/
Dip_vs_lblc.c106 atomic_t entries; /* number of entries */ member
173 atomic_inc(&tbl->entries); in ip_vs_lblc_hash()
241 atomic_dec(&tbl->entries); in ip_vs_lblc_flush()
275 atomic_dec(&tbl->entries); in ip_vs_lblc_full_check()
311 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire()
316 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire()
329 atomic_dec(&tbl->entries); in ip_vs_lblc_check_expire()
370 atomic_set(&tbl->entries, 0); in ip_vs_lblc_init_svc()
Dip_vs_lblcr.c274 atomic_t entries; /* number of entries */ member
336 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash()
439 atomic_dec(&tbl->entries); in ip_vs_lblcr_full_check()
475 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblcr_check_expire()
480 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblcr_check_expire()
493 atomic_dec(&tbl->entries); in ip_vs_lblcr_check_expire()
533 atomic_set(&tbl->entries, 0); in ip_vs_lblcr_init_svc()
/net/netfilter/
Dxt_repldata.h23 struct type##_standard entries[]; \
26 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \
43 tbl->entries[i++] = (struct type##_standard) \
Dxt_recent.c82 unsigned int entries; member
149 t->entries--; in recent_entry_remove()
185 if (t->entries >= ip_list_tot) { in recent_entry_init()
205 t->entries++; in recent_entry_init()
Dnf_dup_netdev.c67 entry = &flow->rule->action.entries[ctx->num_actions++]; in nft_fwd_dup_netdev_offload()
Dnf_nat_core.c40 struct nf_hook_entries __rcu *entries; member
753 struct nf_hook_entries *e = rcu_dereference(lpriv->entries); in nf_nat_inet_fn()
1082 ret = nf_hook_entries_insert_raw(&priv->entries, ops); in nf_nat_register_fn()
1121 nf_hook_entries_delete_raw(&priv->entries, ops); in nf_nat_unregister_fn()
/net/xfrm/
Dxfrm_algo.c607 int entries; member
614 .entries = ARRAY_SIZE(aead_list),
621 .entries = ARRAY_SIZE(aalg_list),
628 .entries = ARRAY_SIZE(ealg_list),
635 .entries = ARRAY_SIZE(calg_list),
648 for (i = 0; i < algo_list->entries; i++) { in xfrm_find_algo()
/net/ipv4/netfilter/
Darp_tables.c208 table_base = private->entries; in arpt_do_table()
610 xt_entry_foreach(iter, t->entries, t->size) { in get_counters()
637 xt_entry_foreach(iter, t->entries, t->size) { in get_old_counters()
684 loc_cpu_entry = private->entries; in copy_entries_to_user()
775 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info()
777 loc_cpu_entry = info->entries; in compat_table_info()
929 loc_cpu_old_entry = oldinfo->entries; in __do_replace()
975 loc_cpu_entry = newinfo->entries; in do_replace()
1032 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters()
1060 struct compat_arpt_entry entries[0]; member
[all …]
Dip_tables.c201 root = get_entry(private->entries, private->hook_entry[hook]); in trace_packet()
263 table_base = private->entries; in ipt_do_table()
751 xt_entry_foreach(iter, t->entries, t->size) { in get_counters()
778 xt_entry_foreach(iter, t->entries, t->size) { in get_old_counters()
826 loc_cpu_entry = private->entries; in copy_entries_to_user()
932 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info()
934 loc_cpu_entry = info->entries; in compat_table_info()
1084 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) in __do_replace()
1129 loc_cpu_entry = newinfo->entries; in do_replace()
1186 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters()
[all …]
Dipt_CLUSTERIP.c41 refcount_t entries; /* number of entries/rules member
118 if (refcount_dec_and_lock(&c->entries, &cn->lock)) { in clusterip_config_entry_put()
166 if (unlikely(!refcount_inc_not_zero(&c->entries))) { in clusterip_config_find_get()
292 refcount_set(&c->entries, 1); in clusterip_config_init()
Diptable_filter.c58 ((struct ipt_standard *)repl->entries)[1].target.verdict = in iptable_filter_table_init()
/net/ipv6/netfilter/
Dip6_tables.c226 root = get_entry(private->entries, private->hook_entry[hook]); in trace_packet()
286 table_base = private->entries; in ip6t_do_table()
769 xt_entry_foreach(iter, t->entries, t->size) { in get_counters()
796 xt_entry_foreach(iter, t->entries, t->size) { in get_old_counters()
843 loc_cpu_entry = private->entries; in copy_entries_to_user()
949 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info()
951 loc_cpu_entry = info->entries; in compat_table_info()
1102 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) in __do_replace()
1147 loc_cpu_entry = newinfo->entries; in do_replace()
1203 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters()
[all …]
Dip6table_filter.c59 ((struct ip6t_standard *)repl->entries)[1].target.verdict = in ip6table_filter_table_init()
/net/sched/
Dsch_taprio.c54 struct list_head entries; member
124 list_for_each_entry_safe(entry, n, &sched->entries, list) { in taprio_free_sched_cb()
221 list_for_each_entry(entry, &sched->entries, list) { in find_entry_to_transmit()
653 if (list_is_last(&entry->list, &oper->entries)) in should_restart_cycle()
723 next = list_first_entry(&oper->entries, struct sched_entry, in advance_sched()
730 next = list_first_entry(&oper->entries, struct sched_entry, in advance_sched()
868 list_add_tail(&entry->list, &sched->entries); in parse_sched_list()
907 list_for_each_entry(entry, &new->entries, list) in parse_taprio_schedule()
1027 first = list_first_entry(&sched->entries, in setup_first_close_time()
1121 list_for_each_entry(entry, &sched->entries, list) { in setup_txtime()
[all …]
/net/core/
Ddrop_monitor.c70 struct net_dm_hw_entry entries[0]; member
235 for (i = 0; i < msg->entries; i++) { in trace_drop_common()
242 if (msg->entries == dm_hit_limit) in trace_drop_common()
251 msg->entries++; in trace_drop_common()
308 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit), in net_dm_hw_reset_per_cpu_data()
362 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]); in net_dm_hw_entries_put()
455 hw_entry = &hw_entries->entries[i]; in net_dm_hw_summary_probe()
465 hw_entry = &hw_entries->entries[hw_entries->num_entries]; in net_dm_hw_summary_probe()
/net/sunrpc/
Dcache.c104 detail->entries --; in sunrpc_cache_add_entry()
116 detail->entries++; in sunrpc_cache_add_entry()
202 detail->entries++; in sunrpc_cache_update()
369 cd->entries = 0; in sunrpc_init_cache_detail()
458 current_detail->entries--; in cache_clean()
519 if (!detail->entries) { in cache_purge()
524 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); in cache_purge()
529 detail->entries--; in cache_purge()
1890 cd->entries--; in sunrpc_cache_unhash()
/net/can/
Daf_can.c477 dev_rcv_lists->entries++; in can_rx_register()
553 dev_rcv_lists->entries--; in can_rx_unregister()
583 if (dev_rcv_lists->entries == 0) in can_rcv_filter()
/net/mpls/
DKconfig23 that have had MPLS stack entries pushed onto them and thus
/net/batman-adv/
DKconfig80 bool "batman-adv debugfs entries"
101 bool "batman-adv sysfs entries"

12