/net/core/ |
D | flow.c | 44 #define flow_table(cpu) (per_cpu(flow_tables, cpu)) argument 57 #define flow_hash_rnd_recalc(cpu) \ argument 59 #define flow_hash_rnd(cpu) \ argument 61 #define flow_count(cpu) \ argument 74 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu)) argument 87 static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) in flow_entry_kill() 95 static void __flow_cache_shrink(int cpu, int shrink_to) in __flow_cache_shrink() 115 static void flow_cache_shrink(int cpu) in flow_cache_shrink() 122 static void flow_new_hash_rnd(int cpu) in flow_new_hash_rnd() 130 static u32 flow_hash_code(struct flowi *key, int cpu) in flow_hash_code() [all …]
|
D | sock.c | 1950 int cpu = smp_processor_id(); in sock_prot_inuse_add() local 1957 int cpu, idx = prot->inuse_idx; in sock_prot_inuse_get() local 2003 int cpu, idx = prot->inuse_idx; in sock_prot_inuse_get() local
|
D | pktgen.c | 391 int cpu; member 3517 int cpu = t->cpu; in pktgen_thread_worker() local 3705 static int __init pktgen_create_thread(int cpu) in pktgen_create_thread() 3807 int cpu; in pg_init() local
|
D | neighbour.c | 1752 int cpu; in neightbl_fill_info() local 2414 int cpu; in neigh_stat_seq_start() local 2432 int cpu; in neigh_stat_seq_next() local
|
D | dev.c | 1882 int cpu = smp_processor_id(); /* ok because BHs are off */ in dev_queue_xmit() local 4962 unsigned int cpu, oldcpu = (unsigned long)ocpu; in dev_cpu_callback() local
|
/net/xfrm/ |
D | xfrm_ipcomp.c | 48 const int cpu = get_cpu(); in ipcomp_decompress() local 141 const int cpu = get_cpu(); in ipcomp_compress() local 245 int cpu; in ipcomp_free_tfms() local 274 int cpu; in ipcomp_alloc_tfms() local
|
/net/iucv/ |
D | iucv.c | 349 int cpu = smp_processor_id(); in iucv_allow_cpu() local 378 int cpu = smp_processor_id(); in iucv_block_cpu() local 398 int cpu = smp_processor_id(); in iucv_declare_cpu() local 453 int cpu = smp_processor_id(); in iucv_retrieve_cpu() local 477 int cpu; in iucv_setmask_mp() local 497 int cpu; in iucv_setmask_up() local 517 int cpu, rc; in iucv_enable() local 561 long cpu = (long) hcpu; in iucv_cpu_notify() local 1686 int cpu; in iucv_init() local 1757 int cpu; in iucv_exit() local
|
/net/netfilter/ |
D | nf_conntrack_ecache.c | 92 int cpu; in nf_ct_event_cache_flush() local
|
D | nf_conntrack_standalone.c | 198 int cpu; in ct_cpu_seq_start() local 216 int cpu; in ct_cpu_seq_next() local
|
D | x_tables.c | 583 int cpu; in xt_alloc_table_info() local 616 int cpu; in xt_free_table_info() local
|
/net/ipv4/netfilter/ |
D | nf_conntrack_l3proto_ipv4_compat.c | 289 int cpu; in ct_cpu_seq_start() local 307 int cpu; in ct_cpu_seq_next() local
|
D | arp_tables.c | 687 unsigned int cpu; in get_counters() local
|
D | ip_tables.c | 899 unsigned int cpu; in get_counters() local
|
/net/sunrpc/ |
D | svc.c | 181 unsigned int cpu; in svc_pool_map_init_percpu() local 333 svc_pool_for_cpu(struct svc_serv *serv, int cpu) in svc_pool_for_cpu()
|
D | svc_xprt.c | 300 int cpu; in svc_xprt_enqueue() local
|
/net/ipv4/ |
D | tcp.c | 2579 int cpu; in __tcp_free_md5sig_pool() local 2610 int cpu; in __tcp_alloc_md5sig_pool() local 2681 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) in __tcp_get_md5sig_pool()
|
D | route.c | 427 int cpu; in rt_cpu_seq_start() local 443 int cpu; in rt_cpu_seq_next() local
|
/net/bridge/netfilter/ |
D | ebtables.c | 52 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ argument 929 int i, cpu; in get_counters() local
|
/net/ipv6/netfilter/ |
D | ip6_tables.c | 928 unsigned int cpu; in get_counters() local
|
/net/ |
D | socket.c | 2250 int cpu; in socket_seq_show() local
|