/net/bridge/netfilter/ |
D | ebtables.c | 209 base = private->entries; in ebt_do_table() 434 struct ebt_entry *e = (void *)newinfo->entries + offset; in ebt_verify_pointers() 443 repl->entries + offset) in ebt_verify_pointers() 833 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) in translate_table() 855 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 895 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 908 cl_s, udc_cnt, i, newinfo->entries)) { in translate_table() 926 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 929 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 1033 EBT_ENTRY_ITERATE(table->entries, table->entries_size, in do_replace_finish() [all …]
|
D | ebtable_broute.c | 33 .entries = (char *)&initial_chain,
|
D | ebtable_filter.c | 41 .entries = (char *)initial_chains,
|
D | ebtable_nat.c | 41 .entries = (char *)initial_chains,
|
/net/netfilter/ |
D | xt_repldata.h | 23 struct type##_standard entries[]; \ 26 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ 43 tbl->entries[i++] = (struct type##_standard) \
|
D | nf_queue.c | 115 const struct nf_hook_entries *entries, in __nf_queue() argument 171 const struct nf_hook_entries *entries, unsigned int index, in nf_queue() argument 176 ret = __nf_queue(skb, state, entries, index, verdict >> NF_VERDICT_QBITS); in nf_queue()
|
D | nf_internals.h | 11 const struct nf_hook_entries *entries, unsigned int index,
|
D | xt_recent.c | 85 unsigned int entries; member 152 t->entries--; in recent_entry_remove() 181 if (t->entries >= ip_list_tot) { in recent_entry_init() 202 t->entries++; in recent_entry_init()
|
/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 109 atomic_t entries; /* number of entries */ member 176 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 244 atomic_dec(&tbl->entries); in ip_vs_lblc_flush() 278 atomic_dec(&tbl->entries); in ip_vs_lblc_full_check() 314 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire() 319 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire() 332 atomic_dec(&tbl->entries); in ip_vs_lblc_check_expire()
|
D | ip_vs_lblcr.c | 278 atomic_t entries; /* number of entries */ member 339 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 442 atomic_dec(&tbl->entries); in ip_vs_lblcr_full_check() 478 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblcr_check_expire() 483 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblcr_check_expire() 496 atomic_dec(&tbl->entries); in ip_vs_lblcr_check_expire()
|
/net/xfrm/ |
D | xfrm_algo.c | 611 int entries; member 618 .entries = ARRAY_SIZE(aead_list), 625 .entries = ARRAY_SIZE(aalg_list), 632 .entries = ARRAY_SIZE(ealg_list), 639 .entries = ARRAY_SIZE(calg_list), 652 for (i = 0; i < algo_list->entries; i++) { in xfrm_find_algo()
|
/net/core/ |
D | drop_monitor.c | 175 for (i = 0; i < msg->entries; i++) { in trace_drop_common() 181 if (msg->entries == dm_hit_limit) in trace_drop_common() 188 memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); in trace_drop_common() 189 msg->points[msg->entries].count = 1; in trace_drop_common() 190 msg->entries++; in trace_drop_common()
|
D | neighbour.c | 309 int entries; in neigh_alloc() local 311 entries = atomic_inc_return(&tbl->entries) - 1; in neigh_alloc() 312 if (entries >= tbl->gc_thresh3 || in neigh_alloc() 313 (entries >= tbl->gc_thresh2 && in neigh_alloc() 316 entries >= tbl->gc_thresh3) { in neigh_alloc() 346 atomic_dec(&tbl->entries); in neigh_alloc() 538 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) in __neigh_create() 761 atomic_dec(&neigh->tbl->entries); in neigh_destroy() 816 if (atomic_read(&tbl->entries) < tbl->gc_thresh1) in neigh_periodic_work() 1614 if (atomic_read(&tbl->entries)) in neigh_table_clear() [all …]
|
/net/ipv4/netfilter/ |
D | arp_tables.c | 212 table_base = private->entries; in arpt_do_table() 627 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 682 loc_cpu_entry = private->entries; in copy_entries_to_user() 773 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info() 775 loc_cpu_entry = info->entries; in compat_table_info() 928 loc_cpu_old_entry = oldinfo->entries; in __do_replace() 975 loc_cpu_entry = newinfo->entries; in do_replace() 1032 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters() 1060 struct compat_arpt_entry entries[0]; member 1204 entry1 = newinfo->entries; in translate_compat_table() [all …]
|
D | ip_tables.c | 203 root = get_entry(private->entries, private->hook_entry[hook]); in trace_packet() 270 table_base = private->entries; in ipt_do_table() 771 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 826 loc_cpu_entry = private->entries; in copy_entries_to_user() 932 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info() 934 loc_cpu_entry = info->entries; in compat_table_info() 1085 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) in __do_replace() 1131 loc_cpu_entry = newinfo->entries; in do_replace() 1188 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters() 1216 struct compat_ipt_entry entries[0]; member [all …]
|
D | ipt_CLUSTERIP.c | 45 refcount_t entries; /* number of entries/rules member 109 if (refcount_dec_and_lock(&c->entries, &cn->lock)) { in clusterip_config_entry_put() 158 refcount_inc(&c->entries); in clusterip_config_find_get() 262 refcount_set(&c->entries, 1); in clusterip_config_init()
|
D | iptable_filter.c | 68 ((struct ipt_standard *)repl->entries)[1].target.verdict = in iptable_filter_table_init()
|
/net/ipv6/netfilter/ |
D | ip6_tables.c | 228 root = get_entry(private->entries, private->hook_entry[hook]); in trace_packet() 292 table_base = private->entries; in ip6t_do_table() 789 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 844 loc_cpu_entry = private->entries; in copy_entries_to_user() 950 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info() 952 loc_cpu_entry = info->entries; in compat_table_info() 1104 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) in __do_replace() 1150 loc_cpu_entry = newinfo->entries; in do_replace() 1206 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters() 1234 struct compat_ip6t_entry entries[0]; member [all …]
|
D | ip6table_filter.c | 62 ((struct ip6t_standard *)repl->entries)[1].target.verdict = in ip6table_filter_table_init()
|
/net/can/ |
D | af_can.c | 499 d->entries++; in can_rx_register() 590 d->entries--; in can_rx_unregister() 596 if (d->remove_on_zero_entries && !d->entries) { in can_rx_unregister() 626 if (d->entries == 0) in can_rcv_filter() 848 if (d->entries) in can_notifier() 918 BUG_ON(d->entries); in can_pernet_exit()
|
D | af_can.h | 75 int entries; member
|
/net/sunrpc/ |
D | cache.c | 100 detail->entries --; in sunrpc_cache_lookup() 112 detail->entries++; in sunrpc_cache_lookup() 187 detail->entries++; in sunrpc_cache_update() 353 cd->entries = 0; in sunrpc_init_cache_detail() 442 current_detail->entries--; in cache_clean() 503 if (!detail->entries) { in cache_purge() 508 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); in cache_purge() 513 detail->entries--; in cache_purge() 1855 cd->entries--; in sunrpc_cache_unhash()
|
/net/mpls/ |
D | Kconfig | 22 that have had MPLS stack entries pushed onto them and thus
|
/net/wireless/ |
D | Kconfig | 134 bool "cfg80211 DebugFS entries" 138 You can enable this if you want debugfs entries for cfg80211.
|
/net/atm/ |
D | Kconfig | 36 ATMARP table. This may cause problems when ATMARP table entries are
|