Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 25 of 62) sorted by relevance

123

/net/iucv/
Diucv.c363 int cpu = smp_processor_id(); in iucv_allow_cpu() local
375 parm = iucv_param_irq[cpu]; in iucv_allow_cpu()
393 cpumask_set_cpu(cpu, &iucv_irq_cpumask); in iucv_allow_cpu()
404 int cpu = smp_processor_id(); in iucv_block_cpu() local
408 parm = iucv_param_irq[cpu]; in iucv_block_cpu()
413 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu()
424 int cpu = smp_processor_id(); in iucv_declare_cpu() local
428 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) in iucv_declare_cpu()
432 parm = iucv_param_irq[cpu]; in iucv_declare_cpu()
434 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); in iucv_declare_cpu()
[all …]
/net/core/
Dgen_stats.c119 struct gnet_stats_basic_cpu __percpu *cpu) in __gnet_stats_copy_basic_cpu() argument
124 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu()
142 struct gnet_stats_basic_cpu __percpu *cpu, in __gnet_stats_copy_basic() argument
147 if (cpu) { in __gnet_stats_copy_basic()
148 __gnet_stats_copy_basic_cpu(bstats, cpu); in __gnet_stats_copy_basic()
163 struct gnet_stats_basic_cpu __percpu *cpu, in ___gnet_stats_copy_basic() argument
169 __gnet_stats_copy_basic(running, &bstats, cpu, b); in ___gnet_stats_copy_basic()
209 struct gnet_stats_basic_cpu __percpu *cpu, in gnet_stats_copy_basic() argument
212 return ___gnet_stats_copy_basic(running, d, cpu, b, in gnet_stats_copy_basic()
233 struct gnet_stats_basic_cpu __percpu *cpu, in gnet_stats_copy_basic_hw() argument
[all …]
Ddrop_monitor.c1027 int cpu, rc; in net_dm_hw_monitor_start() local
1041 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_start()
1042 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_start()
1062 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_start()
1063 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_start()
1083 int cpu; in net_dm_hw_monitor_stop() local
1096 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_stop()
1097 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_stop()
1117 int cpu, rc; in net_dm_trace_on_set() local
1126 for_each_possible_cpu(cpu) { in net_dm_trace_on_set()
[all …]
Ddst.c326 int cpu; in metadata_dst_alloc_percpu() local
334 for_each_possible_cpu(cpu) in metadata_dst_alloc_percpu()
335 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); in metadata_dst_alloc_percpu()
344 int cpu; in metadata_dst_free_percpu() local
346 for_each_possible_cpu(cpu) { in metadata_dst_free_percpu()
347 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); in metadata_dst_free_percpu()
Ddev.c2454 int cpu, u16 offset, u16 count) in remove_xps_queue_cpu() argument
2460 for (tci = cpu * num_tc; num_tc--; tci++) { in remove_xps_queue_cpu()
4237 int cpu = smp_processor_id(); /* ok because BHs are off */ in __dev_queue_xmit() local
4242 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { in __dev_queue_xmit()
4251 HARD_TX_LOCK(dev, txq, cpu); in __dev_queue_xmit()
4440 rflow->cpu = next_cpu; in set_rps_cpu()
4456 int cpu = -1; in get_rps_cpu() local
4504 tcpu = rflow->cpu; in get_rps_cpu()
4527 cpu = tcpu; in get_rps_cpu()
4537 cpu = tcpu; in get_rps_cpu()
[all …]
/net/ipv6/
Dseg6_hmac.c356 int i, alg_count, cpu; in seg6_hmac_init_algo() local
369 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo()
373 p_tfm = per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_init_algo()
386 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo()
388 cpu_to_node(cpu)); in seg6_hmac_init_algo()
391 *per_cpu_ptr(algo->shashs, cpu) = shash; in seg6_hmac_init_algo()
415 int i, alg_count, cpu; in seg6_hmac_exit() local
420 for_each_possible_cpu(cpu) { in seg6_hmac_exit()
424 shash = *per_cpu_ptr(algo->shashs, cpu); in seg6_hmac_exit()
426 tfm = *per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_exit()
/net/openvswitch/
Dflow.c64 unsigned int cpu = smp_processor_id(); in ovs_flow_stats_update() local
67 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update()
73 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update()
74 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
82 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update()
89 likely(!rcu_access_pointer(flow->stats[cpu]))) { in ovs_flow_stats_update()
107 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update()
109 cpumask_set_cpu(cpu, &flow->cpu_used_mask); in ovs_flow_stats_update()
113 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
130 int cpu; in ovs_flow_stats_get() local
[all …]
Dflow_table.c109 int cpu; in flow_free() local
117 for (cpu = 0; cpu < nr_cpu_ids; in flow_free()
118 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { in flow_free()
119 if (flow->stats[cpu]) in flow_free()
121 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free()
191 int i, cpu; in tbl_mask_array_reset_counters() local
201 for_each_possible_cpu(cpu) { in tbl_mask_array_reset_counters()
206 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in tbl_mask_array_reset_counters()
1123 int cpu; in ovs_flow_masks_rebalance() local
1132 for_each_possible_cpu(cpu) { in ovs_flow_masks_rebalance()
[all …]
/net/netfilter/
Dnft_counter.c124 int cpu; in nft_counter_fetch() local
127 for_each_possible_cpu(cpu) { in nft_counter_fetch()
128 myseq = per_cpu_ptr(&nft_counter_seq, cpu); in nft_counter_fetch()
129 this_cpu = per_cpu_ptr(priv->counter, cpu); in nft_counter_fetch()
303 int cpu, err; in nft_counter_module_init() local
305 for_each_possible_cpu(cpu) in nft_counter_module_init()
306 seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu)); in nft_counter_module_init()
Dnf_conntrack_standalone.c394 int cpu; in ct_cpu_seq_start() local
399 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start()
400 if (!cpu_possible(cpu)) in ct_cpu_seq_start()
402 *pos = cpu + 1; in ct_cpu_seq_start()
403 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
412 int cpu; in ct_cpu_seq_next() local
414 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next()
415 if (!cpu_possible(cpu)) in ct_cpu_seq_next()
417 *pos = cpu + 1; in ct_cpu_seq_next()
418 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dnf_synproxy_core.c249 int cpu; in synproxy_cpu_seq_start() local
254 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start()
255 if (!cpu_possible(cpu)) in synproxy_cpu_seq_start()
257 *pos = cpu + 1; in synproxy_cpu_seq_start()
258 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start()
267 int cpu; in synproxy_cpu_seq_next() local
269 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next()
270 if (!cpu_possible(cpu)) in synproxy_cpu_seq_next()
272 *pos = cpu + 1; in synproxy_cpu_seq_next()
273 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
Dnf_conntrack_netlink.c1707 int cpu; in ctnetlink_dump_list() local
1716 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list()
1719 if (!cpu_possible(cpu)) in ctnetlink_dump_list()
1722 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list()
1750 cb->args[0] = cpu; in ctnetlink_dump_list()
2507 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument
2515 NFNETLINK_V0, htons(cpu)); in ctnetlink_ct_stat_cpu_fill_info()
2547 int cpu; in ctnetlink_ct_stat_cpu_dump() local
2553 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump()
2556 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump()
[all …]
Dnf_conncount.c45 int cpu; member
102 int cpu = raw_smp_processor_id(); in find_or_evict() local
117 if (conn->cpu == cpu || age >= 2) { in find_or_evict()
189 conn->cpu = raw_smp_processor_id(); in __nf_conncount_add()
Dnf_conntrack_ecache.c101 int cpu, delay = -1; in ecache_work() local
106 for_each_possible_cpu(cpu) { in ecache_work()
109 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
Dxt_NFQUEUE.c94 int cpu = smp_processor_id(); in nfqueue_tg_v3() local
96 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
Dx_tables.c1204 int cpu; in xt_free_table_info() local
1207 for_each_possible_cpu(cpu) in xt_free_table_info()
1208 kvfree(info->jumpstack[cpu]); in xt_free_table_info()
1328 int cpu; in xt_jumpstack_alloc() local
1353 for_each_possible_cpu(cpu) { in xt_jumpstack_alloc()
1354 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, in xt_jumpstack_alloc()
1355 cpu_to_node(cpu)); in xt_jumpstack_alloc()
1356 if (i->jumpstack[cpu] == NULL) in xt_jumpstack_alloc()
1390 unsigned int cpu; in xt_replace_table() local
1430 for_each_possible_cpu(cpu) { in xt_replace_table()
[all …]
/net/rds/
Dpage.c155 unsigned int cpu; in rds_page_exit() local
157 for_each_possible_cpu(cpu) { in rds_page_exit()
160 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_exit()
161 rdsdebug("cpu %u\n", cpu); in rds_page_exit()
Dtcp_stats.c58 int cpu; in rds_tcp_stats_info_copy() local
63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy()
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
Dib_stats.c91 int cpu; in rds_ib_stats_info_copy() local
96 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy()
97 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
Dstats.c119 int cpu; in rds_stats_info() local
129 for_each_online_cpu(cpu) { in rds_stats_info()
130 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
Dib_recv.c104 int cpu; in rds_ib_recv_alloc_cache() local
110 for_each_possible_cpu(cpu) { in rds_ib_recv_alloc_cache()
111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
139 int cpu; in rds_ib_cache_splice_all_lists() local
141 for_each_possible_cpu(cpu) { in rds_ib_cache_splice_all_lists()
142 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
/net/xfrm/
Dxfrm_ipcomp.c238 int cpu; in ipcomp_free_tfms() local
256 for_each_possible_cpu(cpu) { in ipcomp_free_tfms()
257 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms()
267 int cpu; in ipcomp_alloc_tfms() local
294 for_each_possible_cpu(cpu) { in ipcomp_alloc_tfms()
299 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
/net/bpf/
Dtest_run.c284 if (kattr->test.flags || kattr->test.cpu) in bpf_prog_test_run_tracing()
342 int cpu = kattr->test.cpu, err = 0; in bpf_prog_test_run_raw_tp() local
355 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) in bpf_prog_test_run_raw_tp()
374 cpu == current_cpu) { in bpf_prog_test_run_raw_tp()
376 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { in bpf_prog_test_run_raw_tp()
384 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, in bpf_prog_test_run_raw_tp()
578 if (kattr->test.flags || kattr->test.cpu) in bpf_prog_test_run_skb()
868 if (kattr->test.flags || kattr->test.cpu) in bpf_prog_test_run_flow_dissector()
935 if (kattr->test.flags || kattr->test.cpu) in bpf_prog_test_run_sk_lookup()
/net/ipv4/netfilter/
Darp_tables.c193 unsigned int cpu, stackidx = 0; in arpt_do_table() local
207 cpu = smp_processor_id(); in arpt_do_table()
209 jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; in arpt_do_table()
603 unsigned int cpu; in get_counters() local
606 for_each_possible_cpu(cpu) { in get_counters()
607 seqcount_t *s = &per_cpu(xt_recseq, cpu); in get_counters()
615 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); in get_counters()
633 unsigned int cpu, i; in get_old_counters() local
635 for_each_possible_cpu(cpu) { in get_old_counters()
640 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); in get_old_counters()
/net/sched/
Dcls_basic.c285 int cpu; in basic_dump() local
300 for_each_possible_cpu(cpu) { in basic_dump()
301 struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu); in basic_dump()

123