/net/bridge/netfilter/ |
D | ebtables.c | 211 base = private->entries; in ebt_do_table() 443 struct ebt_entry *e = (void *)newinfo->entries + offset; in ebt_verify_pointers() 452 repl->entries + offset) in ebt_verify_pointers() 841 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) in translate_table() 863 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 904 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 917 cl_s, udc_cnt, i, newinfo->entries)) { in translate_table() 935 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 938 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, in translate_table() 1042 EBT_ENTRY_ITERATE(table->entries, table->entries_size, in do_replace_finish() [all …]
|
D | ebtable_filter.c | 43 .entries = (char *)initial_chains,
|
D | ebtable_nat.c | 43 .entries = (char *)initial_chains,
|
D | ebtable_broute.c | 36 .entries = (char *)&initial_chain,
|
/net/xdp/ |
D | xsk_queue.h | 98 u32 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail() local 100 if (entries == 0) { in xskq_nb_avail() 103 entries = q->prod_tail - q->cons_tail; in xskq_nb_avail() 106 return (entries > dcnt) ? dcnt : entries; in xskq_nb_avail() 123 u32 entries = q->prod_tail - q->cons_tail; in xskq_has_addrs() local 125 if (entries >= cnt) in xskq_has_addrs() 130 entries = q->prod_tail - q->cons_tail; in xskq_has_addrs() 132 return entries >= cnt; in xskq_has_addrs()
|
D | xsk.c | 460 static int xsk_init_queue(u32 entries, struct xsk_queue **queue, in xsk_init_queue() argument 465 if (entries == 0 || *queue || !is_power_of_2(entries)) in xsk_init_queue() 468 q = xskq_create(entries, umem_queue); in xsk_init_queue() 754 int entries; in xsk_setsockopt() local 756 if (optlen < sizeof(entries)) in xsk_setsockopt() 758 if (copy_from_user(&entries, optval, sizeof(entries))) in xsk_setsockopt() 767 err = xsk_init_queue(entries, q, false); in xsk_setsockopt() 810 int entries; in xsk_setsockopt() local 812 if (copy_from_user(&entries, optval, sizeof(entries))) in xsk_setsockopt() 827 err = xsk_init_queue(entries, q, true); in xsk_setsockopt()
|
/net/netfilter/ipvs/ |
D | ip_vs_lblc.c | 106 atomic_t entries; /* number of entries */ member 173 atomic_inc(&tbl->entries); in ip_vs_lblc_hash() 241 atomic_dec(&tbl->entries); in ip_vs_lblc_flush() 275 atomic_dec(&tbl->entries); in ip_vs_lblc_full_check() 311 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire() 316 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire() 329 atomic_dec(&tbl->entries); in ip_vs_lblc_check_expire() 370 atomic_set(&tbl->entries, 0); in ip_vs_lblc_init_svc()
|
D | ip_vs_lblcr.c | 274 atomic_t entries; /* number of entries */ member 336 atomic_inc(&tbl->entries); in ip_vs_lblcr_hash() 439 atomic_dec(&tbl->entries); in ip_vs_lblcr_full_check() 475 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblcr_check_expire() 480 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblcr_check_expire() 493 atomic_dec(&tbl->entries); in ip_vs_lblcr_check_expire() 533 atomic_set(&tbl->entries, 0); in ip_vs_lblcr_init_svc()
|
/net/netfilter/ |
D | xt_repldata.h | 23 struct type##_standard entries[]; \ 26 size_t term_offset = (offsetof(typeof(*tbl), entries[nhooks]) + \ 43 tbl->entries[i++] = (struct type##_standard) \
|
D | xt_recent.c | 82 unsigned int entries; member 149 t->entries--; in recent_entry_remove() 185 if (t->entries >= ip_list_tot) { in recent_entry_init() 205 t->entries++; in recent_entry_init()
|
D | nf_dup_netdev.c | 67 entry = &flow->rule->action.entries[ctx->num_actions++]; in nft_fwd_dup_netdev_offload()
|
D | nf_nat_core.c | 40 struct nf_hook_entries __rcu *entries; member 753 struct nf_hook_entries *e = rcu_dereference(lpriv->entries); in nf_nat_inet_fn() 1082 ret = nf_hook_entries_insert_raw(&priv->entries, ops); in nf_nat_register_fn() 1121 nf_hook_entries_delete_raw(&priv->entries, ops); in nf_nat_unregister_fn()
|
/net/xfrm/ |
D | xfrm_algo.c | 607 int entries; member 614 .entries = ARRAY_SIZE(aead_list), 621 .entries = ARRAY_SIZE(aalg_list), 628 .entries = ARRAY_SIZE(ealg_list), 635 .entries = ARRAY_SIZE(calg_list), 648 for (i = 0; i < algo_list->entries; i++) { in xfrm_find_algo()
|
/net/ipv4/netfilter/ |
D | arp_tables.c | 208 table_base = private->entries; in arpt_do_table() 610 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 637 xt_entry_foreach(iter, t->entries, t->size) { in get_old_counters() 684 loc_cpu_entry = private->entries; in copy_entries_to_user() 775 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info() 777 loc_cpu_entry = info->entries; in compat_table_info() 929 loc_cpu_old_entry = oldinfo->entries; in __do_replace() 975 loc_cpu_entry = newinfo->entries; in do_replace() 1032 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters() 1060 struct compat_arpt_entry entries[0]; member [all …]
|
D | ip_tables.c | 201 root = get_entry(private->entries, private->hook_entry[hook]); in trace_packet() 263 table_base = private->entries; in ipt_do_table() 751 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 778 xt_entry_foreach(iter, t->entries, t->size) { in get_old_counters() 826 loc_cpu_entry = private->entries; in copy_entries_to_user() 932 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info() 934 loc_cpu_entry = info->entries; in compat_table_info() 1084 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) in __do_replace() 1129 loc_cpu_entry = newinfo->entries; in do_replace() 1186 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters() [all …]
|
D | ipt_CLUSTERIP.c | 41 refcount_t entries; /* number of entries/rules member 118 if (refcount_dec_and_lock(&c->entries, &cn->lock)) { in clusterip_config_entry_put() 166 if (unlikely(!refcount_inc_not_zero(&c->entries))) { in clusterip_config_find_get() 292 refcount_set(&c->entries, 1); in clusterip_config_init()
|
D | iptable_filter.c | 58 ((struct ipt_standard *)repl->entries)[1].target.verdict = in iptable_filter_table_init()
|
/net/ipv6/netfilter/ |
D | ip6_tables.c | 226 root = get_entry(private->entries, private->hook_entry[hook]); in trace_packet() 286 table_base = private->entries; in ip6t_do_table() 769 xt_entry_foreach(iter, t->entries, t->size) { in get_counters() 796 xt_entry_foreach(iter, t->entries, t->size) { in get_old_counters() 843 loc_cpu_entry = private->entries; in copy_entries_to_user() 949 memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); in compat_table_info() 951 loc_cpu_entry = info->entries; in compat_table_info() 1102 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) in __do_replace() 1147 loc_cpu_entry = newinfo->entries; in do_replace() 1203 xt_entry_foreach(iter, private->entries, private->size) { in do_add_counters() [all …]
|
D | ip6table_filter.c | 59 ((struct ip6t_standard *)repl->entries)[1].target.verdict = in ip6table_filter_table_init()
|
/net/sched/ |
D | sch_taprio.c | 54 struct list_head entries; member 124 list_for_each_entry_safe(entry, n, &sched->entries, list) { in taprio_free_sched_cb() 221 list_for_each_entry(entry, &sched->entries, list) { in find_entry_to_transmit() 653 if (list_is_last(&entry->list, &oper->entries)) in should_restart_cycle() 723 next = list_first_entry(&oper->entries, struct sched_entry, in advance_sched() 730 next = list_first_entry(&oper->entries, struct sched_entry, in advance_sched() 868 list_add_tail(&entry->list, &sched->entries); in parse_sched_list() 907 list_for_each_entry(entry, &new->entries, list) in parse_taprio_schedule() 1027 first = list_first_entry(&sched->entries, in setup_first_close_time() 1121 list_for_each_entry(entry, &sched->entries, list) { in setup_txtime() [all …]
|
/net/core/ |
D | drop_monitor.c | 70 struct net_dm_hw_entry entries[0]; member 235 for (i = 0; i < msg->entries; i++) { in trace_drop_common() 242 if (msg->entries == dm_hit_limit) in trace_drop_common() 251 msg->entries++; in trace_drop_common() 308 hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit), in net_dm_hw_reset_per_cpu_data() 362 rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]); in net_dm_hw_entries_put() 455 hw_entry = &hw_entries->entries[i]; in net_dm_hw_summary_probe() 465 hw_entry = &hw_entries->entries[hw_entries->num_entries]; in net_dm_hw_summary_probe()
|
/net/sunrpc/ |
D | cache.c | 104 detail->entries --; in sunrpc_cache_add_entry() 116 detail->entries++; in sunrpc_cache_add_entry() 202 detail->entries++; in sunrpc_cache_update() 369 cd->entries = 0; in sunrpc_init_cache_detail() 458 current_detail->entries--; in cache_clean() 519 if (!detail->entries) { in cache_purge() 524 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name); in cache_purge() 529 detail->entries--; in cache_purge() 1890 cd->entries--; in sunrpc_cache_unhash()
|
/net/can/ |
D | af_can.c | 477 dev_rcv_lists->entries++; in can_rx_register() 553 dev_rcv_lists->entries--; in can_rx_unregister() 583 if (dev_rcv_lists->entries == 0) in can_rcv_filter()
|
/net/mpls/ |
D | Kconfig | 23 that have had MPLS stack entries pushed onto them and thus
|
/net/batman-adv/ |
D | Kconfig | 80 bool "batman-adv debugfs entries" 101 bool "batman-adv sysfs entries"
|