/net/netfilter/ |
D | nf_flow_table_procfs.c | 9 int cpu; in nf_flow_table_cpu_seq_start() local 14 for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_start() 15 if (!cpu_possible(cpu)) in nf_flow_table_cpu_seq_start() 17 *pos = cpu + 1; in nf_flow_table_cpu_seq_start() 18 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_start() 27 int cpu; in nf_flow_table_cpu_seq_next() local 29 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in nf_flow_table_cpu_seq_next() 30 if (!cpu_possible(cpu)) in nf_flow_table_cpu_seq_next() 32 *pos = cpu + 1; in nf_flow_table_cpu_seq_next() 33 return per_cpu_ptr(net->ft.stat, cpu); in nf_flow_table_cpu_seq_next()
|
D | nft_counter.c | 125 int cpu; in nft_counter_fetch() local 128 for_each_possible_cpu(cpu) { in nft_counter_fetch() 129 myseq = per_cpu_ptr(&nft_counter_seq, cpu); in nft_counter_fetch() 130 this_cpu = per_cpu_ptr(priv->counter, cpu); in nft_counter_fetch() 280 int cpu; in nft_counter_init_seqcount() local 282 for_each_possible_cpu(cpu) in nft_counter_init_seqcount() 283 seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu)); in nft_counter_init_seqcount()
|
D | nf_conntrack_standalone.c | 397 int cpu; in ct_cpu_seq_start() local 402 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start() 403 if (!cpu_possible(cpu)) in ct_cpu_seq_start() 405 *pos = cpu + 1; in ct_cpu_seq_start() 406 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start() 415 int cpu; in ct_cpu_seq_next() local 417 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next() 418 if (!cpu_possible(cpu)) in ct_cpu_seq_next() 420 *pos = cpu + 1; in ct_cpu_seq_next() 421 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
|
D | nf_synproxy_core.c | 243 int cpu; in synproxy_cpu_seq_start() local 248 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start() 249 if (!cpu_possible(cpu)) in synproxy_cpu_seq_start() 251 *pos = cpu + 1; in synproxy_cpu_seq_start() 252 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start() 261 int cpu; in synproxy_cpu_seq_next() local 263 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next() 264 if (!cpu_possible(cpu)) in synproxy_cpu_seq_next() 266 *pos = cpu + 1; in synproxy_cpu_seq_next() 267 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
|
D | nf_conncount.c | 45 int cpu; member 102 int cpu = raw_smp_processor_id(); in find_or_evict() local 117 if (conn->cpu == cpu || age >= 2) { in find_or_evict() 193 conn->cpu = raw_smp_processor_id(); in __nf_conncount_add()
|
D | xt_NFQUEUE.c | 94 int cpu = smp_processor_id(); in nfqueue_tg_v3() local 96 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
|
D | nf_conntrack_netlink.c | 63 unsigned int cpu; member 2491 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument 2499 NFNETLINK_V0, htons(cpu)); in ctnetlink_ct_stat_cpu_fill_info() 2531 int cpu; in ctnetlink_ct_stat_cpu_dump() local 2537 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump() 2540 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump() 2543 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_ct_stat_cpu_dump() 2547 cpu, st) < 0) in ctnetlink_ct_stat_cpu_dump() 2550 cb->args[0] = cpu; in ctnetlink_ct_stat_cpu_dump() 3689 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, in ctnetlink_exp_stat_fill_info() argument [all …]
|
D | x_tables.c | 1204 int cpu; in xt_free_table_info() local 1207 for_each_possible_cpu(cpu) in xt_free_table_info() 1208 kvfree(info->jumpstack[cpu]); in xt_free_table_info() 1328 int cpu; in xt_jumpstack_alloc() local 1353 for_each_possible_cpu(cpu) { in xt_jumpstack_alloc() 1354 i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, in xt_jumpstack_alloc() 1355 cpu_to_node(cpu)); in xt_jumpstack_alloc() 1356 if (i->jumpstack[cpu] == NULL) in xt_jumpstack_alloc() 1390 unsigned int cpu; in xt_replace_table() local 1430 for_each_possible_cpu(cpu) { in xt_replace_table() [all …]
|
/net/core/ |
D | gen_stats.c | 127 struct gnet_stats_basic_sync __percpu *cpu) in gnet_stats_add_basic_cpu() argument 133 struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i); in gnet_stats_add_basic_cpu() 150 struct gnet_stats_basic_sync __percpu *cpu, in gnet_stats_add_basic() argument 157 WARN_ON_ONCE((cpu || running) && in_hardirq()); in gnet_stats_add_basic() 159 if (cpu) { in gnet_stats_add_basic() 160 gnet_stats_add_basic_cpu(bstats, cpu); in gnet_stats_add_basic() 175 struct gnet_stats_basic_sync __percpu *cpu, in gnet_stats_read_basic() argument 180 if (cpu) { in gnet_stats_read_basic() 185 struct gnet_stats_basic_sync *bcpu = per_cpu_ptr(cpu, i); in gnet_stats_read_basic() 212 struct gnet_stats_basic_sync __percpu *cpu, in ___gnet_stats_copy_basic() argument [all …]
|
D | drop_monitor.c | 1034 int cpu, rc; in net_dm_hw_monitor_start() local 1048 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_start() 1049 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_start() 1069 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_start() 1070 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_start() 1090 int cpu; in net_dm_hw_monitor_stop() local 1103 for_each_possible_cpu(cpu) { in net_dm_hw_monitor_stop() 1104 struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); in net_dm_hw_monitor_stop() 1124 int cpu, rc; in net_dm_trace_on_set() local 1133 for_each_possible_cpu(cpu) { in net_dm_trace_on_set() [all …]
|
D | dst.c | 326 int cpu; in metadata_dst_alloc_percpu() local 334 for_each_possible_cpu(cpu) in metadata_dst_alloc_percpu() 335 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen); in metadata_dst_alloc_percpu() 344 int cpu; in metadata_dst_free_percpu() local 346 for_each_possible_cpu(cpu) { in metadata_dst_free_percpu() 347 struct metadata_dst *one_md_dst = per_cpu_ptr(md_dst, cpu); in metadata_dst_free_percpu()
|
D | dev.c | 2434 int cpu, u16 offset, u16 count) in remove_xps_queue_cpu() argument 2440 for (tci = cpu * num_tc; num_tc--; tci++) { in remove_xps_queue_cpu() 3086 int cpu, count = 0; in netif_get_num_default_rss_queues() local 3092 for_each_cpu(cpu, cpus) { in netif_get_num_default_rss_queues() 3094 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); in netif_get_num_default_rss_queues() 4283 int cpu = smp_processor_id(); /* ok because BHs are off */ in __dev_queue_xmit() local 4288 if (READ_ONCE(txq->xmit_lock_owner) != cpu) { in __dev_queue_xmit() 4296 HARD_TX_LOCK(dev, txq, cpu); in __dev_queue_xmit() 4474 rflow->cpu = next_cpu; in set_rps_cpu() 4490 int cpu = -1; in get_rps_cpu() local [all …]
|
/net/iucv/ |
D | iucv.c | 363 int cpu = smp_processor_id(); in iucv_allow_cpu() local 375 parm = iucv_param_irq[cpu]; in iucv_allow_cpu() 393 cpumask_set_cpu(cpu, &iucv_irq_cpumask); in iucv_allow_cpu() 404 int cpu = smp_processor_id(); in iucv_block_cpu() local 408 parm = iucv_param_irq[cpu]; in iucv_block_cpu() 413 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu() 424 int cpu = smp_processor_id(); in iucv_declare_cpu() local 428 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) in iucv_declare_cpu() 432 parm = iucv_param_irq[cpu]; in iucv_declare_cpu() 434 parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); in iucv_declare_cpu() [all …]
|
/net/ipv6/ |
D | seg6_hmac.c | 356 int i, alg_count, cpu; in seg6_hmac_init_algo() local 369 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo() 373 p_tfm = per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_init_algo() 386 for_each_possible_cpu(cpu) { in seg6_hmac_init_algo() 388 cpu_to_node(cpu)); in seg6_hmac_init_algo() 391 *per_cpu_ptr(algo->shashs, cpu) = shash; in seg6_hmac_init_algo() 413 int i, alg_count, cpu; in seg6_hmac_exit() local 418 for_each_possible_cpu(cpu) { in seg6_hmac_exit() 422 shash = *per_cpu_ptr(algo->shashs, cpu); in seg6_hmac_exit() 424 tfm = *per_cpu_ptr(algo->tfms, cpu); in seg6_hmac_exit()
|
/net/openvswitch/ |
D | flow.c | 65 unsigned int cpu = smp_processor_id(); in ovs_flow_stats_update() local 68 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update() 74 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update() 75 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 83 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update() 90 likely(!rcu_access_pointer(flow->stats[cpu]))) { in ovs_flow_stats_update() 108 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update() 110 cpumask_set_cpu(cpu, &flow->cpu_used_mask); in ovs_flow_stats_update() 114 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 131 int cpu; in ovs_flow_stats_get() local [all …]
|
D | flow_table.c | 109 int cpu; in flow_free() local 117 for (cpu = 0; cpu < nr_cpu_ids; in flow_free() 118 cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { in flow_free() 119 if (flow->stats[cpu]) in flow_free() 121 (struct sw_flow_stats __force *)flow->stats[cpu]); in flow_free() 191 int i, cpu; in tbl_mask_array_reset_counters() local 201 for_each_possible_cpu(cpu) { in tbl_mask_array_reset_counters() 206 stats = per_cpu_ptr(ma->masks_usage_stats, cpu); in tbl_mask_array_reset_counters() 1123 int cpu; in ovs_flow_masks_rebalance() local 1132 for_each_possible_cpu(cpu) { in ovs_flow_masks_rebalance() [all …]
|
/net/rds/ |
D | page.c | 155 unsigned int cpu; in rds_page_exit() local 157 for_each_possible_cpu(cpu) { in rds_page_exit() 160 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_exit() 161 rdsdebug("cpu %u\n", cpu); in rds_page_exit()
|
D | tcp_stats.c | 58 int cpu; in rds_tcp_stats_info_copy() local 63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy() 64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
|
D | ib_stats.c | 91 int cpu; in rds_ib_stats_info_copy() local 96 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy() 97 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
|
D | stats.c | 119 int cpu; in rds_stats_info() local 129 for_each_online_cpu(cpu) { in rds_stats_info() 130 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
|
/net/xfrm/ |
D | xfrm_ipcomp.c | 238 int cpu; in ipcomp_free_tfms() local 256 for_each_possible_cpu(cpu) { in ipcomp_free_tfms() 257 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms() 267 int cpu; in ipcomp_alloc_tfms() local 294 for_each_possible_cpu(cpu) { in ipcomp_alloc_tfms() 299 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
|
/net/ |
D | Kconfig.debug | 10 This adds memory and cpu costs. 19 This adds memory and cpu costs.
|
/net/bpf/ |
D | test_run.c | 799 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_tracing() 857 int cpu = kattr->test.cpu, err = 0; in bpf_prog_test_run_raw_tp() local 870 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) in bpf_prog_test_run_raw_tp() 885 cpu == current_cpu) { in bpf_prog_test_run_raw_tp() 887 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { in bpf_prog_test_run_raw_tp() 895 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, in bpf_prog_test_run_raw_tp() 1094 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_skb() 1448 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_flow_dissector() 1512 if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) in bpf_prog_test_run_sk_lookup()
|
/net/ipv4/netfilter/ |
D | arp_tables.c | 194 unsigned int cpu, stackidx = 0; in arpt_do_table() local 208 cpu = smp_processor_id(); in arpt_do_table() 210 jumpstack = (struct arpt_entry **)private->jumpstack[cpu]; in arpt_do_table() 604 unsigned int cpu; in get_counters() local 607 for_each_possible_cpu(cpu) { in get_counters() 608 seqcount_t *s = &per_cpu(xt_recseq, cpu); in get_counters() 616 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); in get_counters() 634 unsigned int cpu, i; in get_old_counters() local 636 for_each_possible_cpu(cpu) { in get_old_counters() 641 tmp = xt_get_per_cpu_counter(&iter->counters, cpu); in get_old_counters()
|
/net/sched/ |
D | cls_basic.c | 273 int cpu; in basic_dump() local 288 for_each_possible_cpu(cpu) { in basic_dump() 289 struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu); in basic_dump()
|