Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 38) sorted by relevance

12

/net/iucv/
Diucv.c379 int cpu = smp_processor_id(); in iucv_allow_cpu() local
391 parm = iucv_param_irq[cpu]; in iucv_allow_cpu()
409 cpumask_set_cpu(cpu, &iucv_irq_cpumask); in iucv_allow_cpu()
420 int cpu = smp_processor_id(); in iucv_block_cpu() local
424 parm = iucv_param_irq[cpu]; in iucv_block_cpu()
429 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu()
440 int cpu = smp_processor_id(); in iucv_block_cpu_almost() local
444 parm = iucv_param_irq[cpu]; in iucv_block_cpu_almost()
454 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu_almost()
465 int cpu = smp_processor_id(); in iucv_declare_cpu() local
[all …]
/net/xfrm/
Dxfrm_ipcomp.c48 const int cpu = get_cpu(); in ipcomp_decompress() local
49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress()
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress()
144 const int cpu = get_cpu(); in ipcomp_compress() local
145 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_compress()
146 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_compress()
248 int cpu; in ipcomp_free_tfms() local
266 for_each_possible_cpu(cpu) { in ipcomp_free_tfms()
267 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms()
277 int cpu; in ipcomp_alloc_tfms() local
[all …]
/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c327 int cpu; in ct_cpu_seq_start() local
332 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
333 if (!cpu_possible(cpu)) in ct_cpu_seq_start()
335 *pos = cpu+1; in ct_cpu_seq_start()
336 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
345 int cpu; in ct_cpu_seq_next() local
347 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
348 if (!cpu_possible(cpu)) in ct_cpu_seq_next()
350 *pos = cpu+1; in ct_cpu_seq_next()
351 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dip_tables.c302 unsigned int *stackptr, origptr, cpu; in ipt_do_table() local
329 cpu = smp_processor_id(); in ipt_do_table()
330 table_base = private->entries[cpu]; in ipt_do_table()
331 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; in ipt_do_table()
332 stackptr = per_cpu_ptr(private->stackptr, cpu); in ipt_do_table()
878 unsigned int cpu; in get_counters() local
881 for_each_possible_cpu(cpu) { in get_counters()
882 seqcount_t *s = &per_cpu(xt_recseq, cpu); in get_counters()
885 xt_entry_foreach(iter, t->entries[cpu], t->size) { in get_counters()
/net/netfilter/
Dnf_conntrack_standalone.c276 int cpu; in ct_cpu_seq_start() local
281 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
282 if (!cpu_possible(cpu)) in ct_cpu_seq_start()
284 *pos = cpu + 1; in ct_cpu_seq_start()
285 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
294 int cpu; in ct_cpu_seq_next() local
296 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
297 if (!cpu_possible(cpu)) in ct_cpu_seq_next()
299 *pos = cpu + 1; in ct_cpu_seq_next()
300 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dx_tables.c679 int cpu; in xt_alloc_table_info() local
691 for_each_possible_cpu(cpu) { in xt_alloc_table_info()
693 newinfo->entries[cpu] = kmalloc_node(size, in xt_alloc_table_info()
695 cpu_to_node(cpu)); in xt_alloc_table_info()
697 newinfo->entries[cpu] = vmalloc_node(size, in xt_alloc_table_info()
698 cpu_to_node(cpu)); in xt_alloc_table_info()
700 if (newinfo->entries[cpu] == NULL) { in xt_alloc_table_info()
712 int cpu; in xt_free_table_info() local
714 for_each_possible_cpu(cpu) { in xt_free_table_info()
716 kfree(info->entries[cpu]); in xt_free_table_info()
[all …]
Dnf_conntrack_netlink.c1843 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument
1857 nfmsg->res_id = htons(cpu); in ctnetlink_ct_stat_cpu_fill_info()
1888 int cpu; in ctnetlink_ct_stat_cpu_dump() local
1894 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump()
1897 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump()
1900 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_ct_stat_cpu_dump()
1904 cpu, st) < 0) in ctnetlink_ct_stat_cpu_dump()
1907 cb->args[0] = cpu; in ctnetlink_ct_stat_cpu_dump()
2909 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, in ctnetlink_exp_stat_fill_info() argument
2924 nfmsg->res_id = htons(cpu); in ctnetlink_exp_stat_fill_info()
[all …]
Dxt_NFQUEUE.c153 int cpu = smp_processor_id(); in nfqueue_tg_v3() local
155 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
Dnf_conntrack_core.c1329 int cnt = 0, cpu; in untrack_refs() local
1331 for_each_possible_cpu(cpu) { in untrack_refs()
1332 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); in untrack_refs()
1496 int cpu; in nf_ct_untracked_status_or() local
1498 for_each_possible_cpu(cpu) in nf_ct_untracked_status_or()
1499 per_cpu(nf_conntrack_untracked, cpu).status |= bits; in nf_ct_untracked_status_or()
1506 int ret, cpu; in nf_conntrack_init_start() local
1569 for_each_possible_cpu(cpu) { in nf_conntrack_init_start()
1570 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu); in nf_conntrack_init_start()
Dxt_cpu.c41 return (info->cpu == smp_processor_id()) ^ info->invert; in cpu_mt()
/net/rds/
Dib_stats.c81 int cpu; in rds_ib_stats_info_copy() local
86 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy()
87 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
Dtcp_stats.c58 int cpu; in rds_tcp_stats_info_copy() local
63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy()
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
Diw_stats.c79 int cpu; in rds_iw_stats_info_copy() local
84 for_each_online_cpu(cpu) { in rds_iw_stats_info_copy()
85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); in rds_iw_stats_info_copy()
Dstats.c116 int cpu; in rds_stats_info() local
126 for_each_online_cpu(cpu) { in rds_stats_info()
127 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
Dpage.c186 long cpu = (long)hcpu; in rds_page_remainder_cpu_notify() local
188 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_remainder_cpu_notify()
190 rdsdebug("cpu %ld action 0x%lx\n", cpu, action); in rds_page_remainder_cpu_notify()
Dib_recv.c103 int cpu; in rds_ib_recv_alloc_cache() local
109 for_each_possible_cpu(cpu) { in rds_ib_recv_alloc_cache()
110 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
138 int cpu; in rds_ib_cache_splice_all_lists() local
140 for_each_possible_cpu(cpu) { in rds_ib_cache_splice_all_lists()
141 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
/net/core/
Ddev.c1800 int cpu, u16 index) in remove_xps_queue() argument
1806 map = xmap_dereference(dev_maps->cpu_map[cpu]); in remove_xps_queue()
1813 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); in remove_xps_queue()
1827 int cpu, i; in netif_reset_xps_queues_gt() local
1836 for_each_possible_cpu(cpu) { in netif_reset_xps_queues_gt()
1838 if (!remove_xps_queue(dev_maps, cpu, i)) in netif_reset_xps_queues_gt()
1859 int cpu, u16 index) in expand_xps_map() argument
1881 cpu_to_node(cpu)); in expand_xps_map()
1898 int cpu, numa_node_id = -2; in netif_set_xps_queue() local
1906 for_each_online_cpu(cpu) { in netif_set_xps_queue()
[all …]
Ddrop_monitor.c358 int cpu, rc; in init_net_drop_monitor() local
383 for_each_possible_cpu(cpu) { in init_net_drop_monitor()
384 data = &per_cpu(dm_cpu_data, cpu); in init_net_drop_monitor()
405 int cpu; in exit_net_drop_monitor() local
416 for_each_possible_cpu(cpu) { in exit_net_drop_monitor()
417 data = &per_cpu(dm_cpu_data, cpu); in exit_net_drop_monitor()
Dflow.c332 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) in flow_cache_percpu_empty() argument
337 fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_percpu_empty()
406 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) in flow_cache_cpu_prepare() argument
408 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu_prepare()
412 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); in flow_cache_cpu_prepare()
429 int res, cpu = (unsigned long) hcpu; in flow_cache_cpu() local
430 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu()
435 res = flow_cache_cpu_prepare(fc, cpu); in flow_cache_cpu()
Dneighbour.c1857 int cpu; in neightbl_fill_info() local
1862 for_each_possible_cpu(cpu) { in neightbl_fill_info()
1865 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info()
2631 int cpu; in neigh_stat_seq_start() local
2636 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_start()
2637 if (!cpu_possible(cpu)) in neigh_stat_seq_start()
2639 *pos = cpu+1; in neigh_stat_seq_start()
2640 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_start()
2648 int cpu; in neigh_stat_seq_next() local
2650 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in neigh_stat_seq_next()
[all …]
Dpktgen.c421 int cpu; member
3301 int cpu = t->cpu; in pktgen_thread_worker() local
3303 BUG_ON(smp_processor_id() != cpu); in pktgen_thread_worker()
3308 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); in pktgen_thread_worker()
3436 int node = cpu_to_node(t->cpu); in pktgen_add_device()
3506 static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) in pktgen_create_thread() argument
3513 cpu_to_node(cpu)); in pktgen_create_thread()
3520 t->cpu = cpu; in pktgen_create_thread()
3529 cpu_to_node(cpu), in pktgen_create_thread()
3530 "kpktgend_%d", cpu); in pktgen_create_thread()
[all …]
/net/batman-adv/
Dmain.h296 int cpu; in batadv_sum_counter() local
298 for_each_possible_cpu(cpu) { in batadv_sum_counter()
299 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); in batadv_sum_counter()
/net/bridge/
Dbr_device.c129 unsigned int cpu; in br_get_stats64() local
131 for_each_possible_cpu(cpu) { in br_get_stats64()
134 = per_cpu_ptr(br->stats, cpu); in br_get_stats64()
/net/sunrpc/
Dsvc.c184 unsigned int cpu; in svc_pool_map_init_percpu() local
191 for_each_online_cpu(cpu) { in svc_pool_map_init_percpu()
193 m->to_pool[cpu] = pidx; in svc_pool_map_init_percpu()
194 m->pool_to[pidx] = cpu; in svc_pool_map_init_percpu()
350 svc_pool_for_cpu(struct svc_serv *serv, int cpu) in svc_pool_for_cpu() argument
363 pidx = m->to_pool[cpu]; in svc_pool_for_cpu()
366 pidx = m->to_pool[cpu_to_node(cpu)]; in svc_pool_for_cpu()
/net/ipv4/
Droute.c252 int cpu; in rt_cpu_seq_start() local
257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start()
258 if (!cpu_possible(cpu)) in rt_cpu_seq_start()
260 *pos = cpu+1; in rt_cpu_seq_start()
261 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_start()
268 int cpu; in rt_cpu_seq_next() local
270 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next()
271 if (!cpu_possible(cpu)) in rt_cpu_seq_next()
273 *pos = cpu+1; in rt_cpu_seq_next()
274 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_next()

12