Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 46) sorted by relevance

12

/net/iucv/
Diucv.c379 int cpu = smp_processor_id(); in iucv_allow_cpu() local
391 parm = iucv_param_irq[cpu]; in iucv_allow_cpu()
409 cpumask_set_cpu(cpu, &iucv_irq_cpumask); in iucv_allow_cpu()
420 int cpu = smp_processor_id(); in iucv_block_cpu() local
424 parm = iucv_param_irq[cpu]; in iucv_block_cpu()
429 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu()
440 int cpu = smp_processor_id(); in iucv_block_cpu_almost() local
444 parm = iucv_param_irq[cpu]; in iucv_block_cpu_almost()
454 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu_almost()
465 int cpu = smp_processor_id(); in iucv_declare_cpu() local
[all …]
/net/netfilter/
Dnf_synproxy_core.c247 int cpu; in synproxy_cpu_seq_start() local
252 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start()
253 if (!cpu_possible(cpu)) in synproxy_cpu_seq_start()
255 *pos = cpu + 1; in synproxy_cpu_seq_start()
256 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start()
265 int cpu; in synproxy_cpu_seq_next() local
267 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next()
268 if (!cpu_possible(cpu)) in synproxy_cpu_seq_next()
270 *pos = cpu + 1; in synproxy_cpu_seq_next()
271 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
Dnf_conntrack_standalone.c276 int cpu; in ct_cpu_seq_start() local
281 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
282 if (!cpu_possible(cpu)) in ct_cpu_seq_start()
284 *pos = cpu + 1; in ct_cpu_seq_start()
285 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
294 int cpu; in ct_cpu_seq_next() local
296 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
297 if (!cpu_possible(cpu)) in ct_cpu_seq_next()
299 *pos = cpu + 1; in ct_cpu_seq_next()
300 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dnf_conntrack_core.c252 ct->cpu = smp_processor_id(); in nf_ct_add_to_dying_list()
253 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_dying_list()
267 ct->cpu = smp_processor_id(); in nf_ct_add_to_unconfirmed_list()
268 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_unconfirmed_list()
282 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_del_from_dying_or_unconfirmed_list()
554 tmpl->cpu = smp_processor_id(); in nf_conntrack_tmpl_insert()
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu); in nf_conntrack_tmpl_insert()
1364 int cpu; in get_next_corpse() local
1384 for_each_possible_cpu(cpu) { in get_next_corpse()
1385 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in get_next_corpse()
[all …]
Dx_tables.c951 int cpu; in xt_alloc_table_info() local
963 for_each_possible_cpu(cpu) { in xt_alloc_table_info()
965 newinfo->entries[cpu] = kmalloc_node(size, in xt_alloc_table_info()
967 cpu_to_node(cpu)); in xt_alloc_table_info()
969 newinfo->entries[cpu] = vmalloc_node(size, in xt_alloc_table_info()
970 cpu_to_node(cpu)); in xt_alloc_table_info()
972 if (newinfo->entries[cpu] == NULL) { in xt_alloc_table_info()
984 int cpu; in xt_free_table_info() local
986 for_each_possible_cpu(cpu) in xt_free_table_info()
987 kvfree(info->entries[cpu]); in xt_free_table_info()
[all …]
Dnf_conntrack_netlink.c1163 int cpu; in ctnetlink_dump_list() local
1172 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list()
1175 if (!cpu_possible(cpu)) in ctnetlink_dump_list()
1178 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list()
1200 cb->args[0] = cpu; in ctnetlink_dump_list()
1859 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument
1873 nfmsg->res_id = htons(cpu); in ctnetlink_ct_stat_cpu_fill_info()
1904 int cpu; in ctnetlink_ct_stat_cpu_dump() local
1910 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump()
1913 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump()
[all …]
Dnf_conntrack_ecache.c84 int cpu, delay = -1; in ecache_work() local
89 for_each_possible_cpu(cpu) { in ecache_work()
92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
Dnft_queue.c40 int cpu = raw_smp_processor_id(); in nft_queue_eval() local
42 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval()
Dxt_NFQUEUE.c96 int cpu = smp_processor_id(); in nfqueue_tg_v3() local
98 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c327 int cpu; in ct_cpu_seq_start() local
332 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
333 if (!cpu_possible(cpu)) in ct_cpu_seq_start()
335 *pos = cpu+1; in ct_cpu_seq_start()
336 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
345 int cpu; in ct_cpu_seq_next() local
347 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
348 if (!cpu_possible(cpu)) in ct_cpu_seq_next()
350 *pos = cpu+1; in ct_cpu_seq_next()
351 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
/net/core/
Dgen_stats.c105 struct gnet_stats_basic_cpu __percpu *cpu) in __gnet_stats_copy_basic_cpu() argument
110 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu()
128 struct gnet_stats_basic_cpu __percpu *cpu, in __gnet_stats_copy_basic() argument
131 if (cpu) { in __gnet_stats_copy_basic()
132 __gnet_stats_copy_basic_cpu(bstats, cpu); in __gnet_stats_copy_basic()
153 struct gnet_stats_basic_cpu __percpu *cpu, in gnet_stats_copy_basic() argument
158 __gnet_stats_copy_basic(&bstats, cpu, b); in gnet_stats_copy_basic()
239 const struct gnet_stats_queue __percpu *cpu, in __gnet_stats_copy_queue() argument
243 if (cpu) { in __gnet_stats_copy_queue()
244 __gnet_stats_copy_queue_cpu(qstats, cpu); in __gnet_stats_copy_queue()
Ddrop_monitor.c360 int cpu, rc; in init_net_drop_monitor() local
385 for_each_possible_cpu(cpu) { in init_net_drop_monitor()
386 data = &per_cpu(dm_cpu_data, cpu); in init_net_drop_monitor()
407 int cpu; in exit_net_drop_monitor() local
418 for_each_possible_cpu(cpu) { in exit_net_drop_monitor()
419 data = &per_cpu(dm_cpu_data, cpu); in exit_net_drop_monitor()
Dflow.c318 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) in flow_cache_percpu_empty() argument
323 fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_percpu_empty()
393 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) in flow_cache_cpu_prepare() argument
395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu_prepare()
399 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); in flow_cache_cpu_prepare()
417 int res, cpu = (unsigned long) hcpu; in flow_cache_cpu() local
418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu()
423 res = flow_cache_cpu_prepare(fc, cpu); in flow_cache_cpu()
Ddev.c1848 int cpu, u16 index) in remove_xps_queue() argument
1854 map = xmap_dereference(dev_maps->cpu_map[cpu]); in remove_xps_queue()
1861 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); in remove_xps_queue()
1875 int cpu, i; in netif_reset_xps_queues_gt() local
1884 for_each_possible_cpu(cpu) { in netif_reset_xps_queues_gt()
1886 if (!remove_xps_queue(dev_maps, cpu, i)) in netif_reset_xps_queues_gt()
1907 int cpu, u16 index) in expand_xps_map() argument
1929 cpu_to_node(cpu)); in expand_xps_map()
1947 int cpu, numa_node_id = -2; in netif_set_xps_queue() local
1955 for_each_online_cpu(cpu) { in netif_set_xps_queue()
[all …]
/net/xfrm/
Dxfrm_ipcomp.c48 const int cpu = get_cpu(); in ipcomp_decompress() local
49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress()
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress()
250 int cpu; in ipcomp_free_tfms() local
268 for_each_possible_cpu(cpu) { in ipcomp_free_tfms()
269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms()
279 int cpu; in ipcomp_alloc_tfms() local
306 for_each_possible_cpu(cpu) { in ipcomp_alloc_tfms()
311 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
/net/rds/
Diw_stats.c79 int cpu; in rds_iw_stats_info_copy() local
84 for_each_online_cpu(cpu) { in rds_iw_stats_info_copy()
85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); in rds_iw_stats_info_copy()
Dtcp_stats.c58 int cpu; in rds_tcp_stats_info_copy() local
63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy()
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
Dib_stats.c81 int cpu; in rds_ib_stats_info_copy() local
86 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy()
87 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
Dstats.c116 int cpu; in rds_stats_info() local
126 for_each_online_cpu(cpu) { in rds_stats_info()
127 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
Dpage.c186 long cpu = (long)hcpu; in rds_page_remainder_cpu_notify() local
188 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_remainder_cpu_notify()
190 rdsdebug("cpu %ld action 0x%lx\n", cpu, action); in rds_page_remainder_cpu_notify()
Dib_recv.c103 int cpu; in rds_ib_recv_alloc_cache() local
109 for_each_possible_cpu(cpu) { in rds_ib_recv_alloc_cache()
110 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
138 int cpu; in rds_ib_cache_splice_all_lists() local
140 for_each_possible_cpu(cpu) { in rds_ib_cache_splice_all_lists()
141 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
/net/batman-adv/
Dmain.h337 int cpu; in batadv_sum_counter() local
339 for_each_possible_cpu(cpu) { in batadv_sum_counter()
340 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); in batadv_sum_counter()
/net/bridge/
Dbr_device.c144 unsigned int cpu; in br_get_stats64() local
146 for_each_possible_cpu(cpu) { in br_get_stats64()
149 = per_cpu_ptr(br->stats, cpu); in br_get_stats64()
/net/sunrpc/
Dsvc.c184 unsigned int cpu; in svc_pool_map_init_percpu() local
191 for_each_online_cpu(cpu) { in svc_pool_map_init_percpu()
193 m->to_pool[cpu] = pidx; in svc_pool_map_init_percpu()
194 m->pool_to[pidx] = cpu; in svc_pool_map_init_percpu()
350 svc_pool_for_cpu(struct svc_serv *serv, int cpu) in svc_pool_for_cpu() argument
363 pidx = m->to_pool[cpu]; in svc_pool_for_cpu()
366 pidx = m->to_pool[cpu_to_node(cpu)]; in svc_pool_for_cpu()
/net/ipv4/
Droute.c245 int cpu; in rt_cpu_seq_start() local
250 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start()
251 if (!cpu_possible(cpu)) in rt_cpu_seq_start()
253 *pos = cpu+1; in rt_cpu_seq_start()
254 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_start()
261 int cpu; in rt_cpu_seq_next() local
263 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next()
264 if (!cpu_possible(cpu)) in rt_cpu_seq_next()
266 *pos = cpu+1; in rt_cpu_seq_next()
267 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_next()

12