/net/iucv/ |
D | iucv.c | 384 int cpu = smp_processor_id(); in iucv_allow_cpu() local 396 parm = iucv_param_irq[cpu]; in iucv_allow_cpu() 414 cpumask_set_cpu(cpu, &iucv_irq_cpumask); in iucv_allow_cpu() 425 int cpu = smp_processor_id(); in iucv_block_cpu() local 429 parm = iucv_param_irq[cpu]; in iucv_block_cpu() 434 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu() 445 int cpu = smp_processor_id(); in iucv_block_cpu_almost() local 449 parm = iucv_param_irq[cpu]; in iucv_block_cpu_almost() 459 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu_almost() 470 int cpu = smp_processor_id(); in iucv_declare_cpu() local [all …]
|
/net/core/ |
D | gen_stats.c | 119 struct gnet_stats_basic_cpu __percpu *cpu) in __gnet_stats_copy_basic_cpu() argument 124 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu() 143 struct gnet_stats_basic_cpu __percpu *cpu, in __gnet_stats_copy_basic() argument 148 if (cpu) { in __gnet_stats_copy_basic() 149 __gnet_stats_copy_basic_cpu(bstats, cpu); in __gnet_stats_copy_basic() 164 struct gnet_stats_basic_cpu __percpu *cpu, in ___gnet_stats_copy_basic() argument 170 __gnet_stats_copy_basic(running, &bstats, cpu, b); in ___gnet_stats_copy_basic() 205 struct gnet_stats_basic_cpu __percpu *cpu, in gnet_stats_copy_basic() argument 208 return ___gnet_stats_copy_basic(running, d, cpu, b, in gnet_stats_copy_basic() 229 struct gnet_stats_basic_cpu __percpu *cpu, in gnet_stats_copy_basic_hw() argument [all …]
|
D | drop_monitor.c | 974 int cpu; in net_dm_hw_monitor_start() local 988 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_start() 989 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_start() 1005 int cpu; in net_dm_hw_monitor_stop() local 1017 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_stop() 1018 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_stop() 1038 int cpu, rc; in net_dm_trace_on_set() local 1047 for_each_possible_cpu(cpu) { in net_dm_trace_on_set() 1048 struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); in net_dm_trace_on_set() 1086 int cpu; in net_dm_trace_off_set() local [all …]
|
D | dst.c | 303 int cpu; in metadata_dst_alloc_percpu() local 311 for_each_possible_cpu(cpu) in metadata_dst_alloc_percpu() 312 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); in metadata_dst_alloc_percpu() 321 int cpu; in metadata_dst_free_percpu() local 323 for_each_possible_cpu(cpu) { in metadata_dst_free_percpu() 324 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); in metadata_dst_free_percpu()
|
D | dev.c | 2101 int cpu, u16 offset, u16 count) in remove_xps_queue_cpu() argument 2107 for (tci = cpu * num_tc; num_tc--; tci++) { in remove_xps_queue_cpu() 3778 int cpu = smp_processor_id(); /* ok because BHs are off */ in __dev_queue_xmit() local 3780 if (txq->xmit_lock_owner != cpu) { in __dev_queue_xmit() 3788 HARD_TX_LOCK(dev, txq, cpu); in __dev_queue_xmit() 3954 rflow->cpu = next_cpu; in set_rps_cpu() 3970 int cpu = -1; in get_rps_cpu() local 4016 tcpu = rflow->cpu; in get_rps_cpu() 4039 cpu = tcpu; in get_rps_cpu() 4049 cpu = tcpu; in get_rps_cpu() [all …]
|
/net/openvswitch/ |
D | flow.c | 63 unsigned int cpu = smp_processor_id(); in ovs_flow_stats_update() local 66 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update() 72 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update() 73 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 81 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update() 88 likely(!rcu_access_pointer(flow->stats[cpu]))) { in ovs_flow_stats_update() 106 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update() 108 cpumask_set_cpu(cpu, &flow->cpu_used_mask); in ovs_flow_stats_update() 112 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 129 int cpu; in ovs_flow_stats_get() local [all …]
|
D | flow_table.c | 103 int cpu; in flow_free() local 110 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) in flow_free() 111 if (flow->stats[cpu]) in flow_free() 113 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free()
|
/net/ipv6/ |
D | seg6_hmac.c | 358 int i, alg_count, cpu; in seg6_hmac_init_algo() local 371 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo() 375 p_tfm = per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_init_algo() 388 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo() 390 cpu_to_node(cpu)); in seg6_hmac_init_algo() 393 *per_cpu_ptr(algo->shashs, cpu) = shash; in seg6_hmac_init_algo() 419 int i, alg_count, cpu; in seg6_hmac_exit() local 424 for_each_possible_cpu(cpu) { in seg6_hmac_exit() 428 shash = *per_cpu_ptr(algo->shashs, cpu); in seg6_hmac_exit() 430 tfm = *per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_exit()
|
/net/xfrm/ |
D | xfrm_ipcomp.c | 44 const int cpu = get_cpu(); in ipcomp_decompress() local 45 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress() 46 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress() 246 int cpu; in ipcomp_free_tfms() local 264 for_each_possible_cpu(cpu) { in ipcomp_free_tfms() 265 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms() 275 int cpu; in ipcomp_alloc_tfms() local 302 for_each_possible_cpu(cpu) { in ipcomp_alloc_tfms() 307 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
|
/net/netfilter/ |
D | nft_counter.c | 123 int cpu; in nft_counter_fetch() local 126 for_each_possible_cpu(cpu) { in nft_counter_fetch() 127 myseq = per_cpu_ptr(&nft_counter_seq, cpu); in nft_counter_fetch() 128 this_cpu = per_cpu_ptr(priv->counter, cpu); in nft_counter_fetch() 274 int cpu, err; in nft_counter_module_init() local 276 for_each_possible_cpu(cpu) in nft_counter_module_init() 277 seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu)); in nft_counter_module_init()
|
D | nf_conntrack_standalone.c | 388 int cpu; in ct_cpu_seq_start() local 393 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start() 394 if (!cpu_possible(cpu)) in ct_cpu_seq_start() 396 *pos = cpu + 1; in ct_cpu_seq_start() 397 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start() 406 int cpu; in ct_cpu_seq_next() local 408 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next() 409 if (!cpu_possible(cpu)) in ct_cpu_seq_next() 411 *pos = cpu + 1; in ct_cpu_seq_next() 412 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
|
D | nf_synproxy_core.c | 244 int cpu; in synproxy_cpu_seq_start() local 249 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start() 250 if (!cpu_possible(cpu)) in synproxy_cpu_seq_start() 252 *pos = cpu + 1; in synproxy_cpu_seq_start() 253 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start() 262 int cpu; in synproxy_cpu_seq_next() local 264 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next() 265 if (!cpu_possible(cpu)) in synproxy_cpu_seq_next() 267 *pos = cpu + 1; in synproxy_cpu_seq_next() 268 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
|
D | nf_conntrack_netlink.c | 1404 int cpu; in ctnetlink_dump_list() local 1413 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list() 1416 if (!cpu_possible(cpu)) in ctnetlink_dump_list() 1419 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list() 1441 cb->args[0] = cpu; in ctnetlink_dump_list() 2200 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument 2215 nfmsg->res_id = htons(cpu); in ctnetlink_ct_stat_cpu_fill_info() 2242 int cpu; in ctnetlink_ct_stat_cpu_dump() local 2248 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump() 2251 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump() [all …]
|
D | nf_conncount.c | 45 int cpu; member 102 int cpu = raw_smp_processor_id(); in find_or_evict() local 117 if (conn->cpu == cpu || age >= 2) { in find_or_evict() 189 conn->cpu = raw_smp_processor_id(); in __nf_conncount_add()
|
D | xt_NFQUEUE.c | 94 int cpu = smp_processor_id(); in nfqueue_tg_v3() local 96 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
|
D | nf_conntrack_ecache.c | 88 int cpu, delay = -1; in ecache_work() local 93 for_each_possible_cpu(cpu) { in ecache_work() 96 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
|
D | x_tables.c | 1188 int cpu; in xt_free_table_info() local 1191 for_each_possible_cpu(cpu) in xt_free_table_info() 1192 kvfree(info->jumpstack[cpu]); in xt_free_table_info() 1297 int cpu; in xt_jumpstack_alloc() local 1322 for_each_possible_cpu(cpu) { in xt_jumpstack_alloc() 1323 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, in xt_jumpstack_alloc() 1324 cpu_to_node(cpu)); in xt_jumpstack_alloc() 1325 if (i->jumpstack[cpu] == NULL) in xt_jumpstack_alloc() 1359 unsigned int cpu; in xt_replace_table() local 1399 for_each_possible_cpu(cpu) { in xt_replace_table() [all …]
|
/net/rds/ |
D | page.c | 155 unsigned int cpu; in rds_page_exit() local 157 for_each_possible_cpu(cpu) { in rds_page_exit() 160 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_exit() 161 rdsdebug("cpu %u\n", cpu); in rds_page_exit()
|
D | tcp_stats.c | 58 int cpu; in rds_tcp_stats_info_copy() local 63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy() 64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
|
D | ib_stats.c | 91 int cpu; in rds_ib_stats_info_copy() local 96 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy() 97 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
|
D | stats.c | 119 int cpu; in rds_stats_info() local 129 for_each_online_cpu(cpu) { in rds_stats_info() 130 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
|
D | ib_recv.c | 104 int cpu; in rds_ib_recv_alloc_cache() local 110 for_each_possible_cpu(cpu) { in rds_ib_recv_alloc_cache() 111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 139 int cpu; in rds_ib_cache_splice_all_lists() local 141 for_each_possible_cpu(cpu) { in rds_ib_cache_splice_all_lists() 142 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
|
/net/ipv4/netfilter/ |
D | arp_tables.c | 193 unsigned int cpu, stackidx = 0; in arpt_do_table() local 207 cpu = smp_processor_id(); in arpt_do_table() 209 jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; in arpt_do_table() 603 unsigned int cpu; in get_counters() local 606 for_each_possible_cpu(cpu) { in get_counters() 607 seqcount_t *s = &per_cpu(xt_recseq, cpu); in get_counters() 615 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); in get_counters() 633 unsigned int cpu, i; in get_old_counters() local 635 for_each_possible_cpu(cpu) { in get_old_counters() 640 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); in get_old_counters()
|
/net/ipv4/ |
D | route.c | 250 int cpu; in rt_cpu_seq_start() local 255 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start() 256 if (!cpu_possible(cpu)) in rt_cpu_seq_start() 258 *pos = cpu+1; in rt_cpu_seq_start() 259 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_start() 266 int cpu; in rt_cpu_seq_next() local 268 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next() 269 if (!cpu_possible(cpu)) in rt_cpu_seq_next() 271 *pos = cpu+1; in rt_cpu_seq_next() 272 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_next() [all …]
|
/net/sched/ |
D | cls_basic.c | 285 int cpu; in basic_dump() local 300 for_each_possible_cpu(cpu) { in basic_dump() 301 struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu); in basic_dump()
|