/net/iucv/ |
D | iucv.c | 397 int cpu = smp_processor_id(); in iucv_allow_cpu() local 409 parm = iucv_param_irq[cpu]; in iucv_allow_cpu() 427 cpumask_set_cpu(cpu, &iucv_irq_cpumask); in iucv_allow_cpu() 438 int cpu = smp_processor_id(); in iucv_block_cpu() local 442 parm = iucv_param_irq[cpu]; in iucv_block_cpu() 447 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu() 458 int cpu = smp_processor_id(); in iucv_block_cpu_almost() local 462 parm = iucv_param_irq[cpu]; in iucv_block_cpu_almost() 472 cpumask_clear_cpu(cpu, &iucv_irq_cpumask); in iucv_block_cpu_almost() 483 int cpu = smp_processor_id(); in iucv_declare_cpu() local [all …]
|
/net/netfilter/ |
D | nf_synproxy_core.c | 250 int cpu; in synproxy_cpu_seq_start() local 255 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_start() 256 if (!cpu_possible(cpu)) in synproxy_cpu_seq_start() 258 *pos = cpu + 1; in synproxy_cpu_seq_start() 259 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start() 268 int cpu; in synproxy_cpu_seq_next() local 270 for (cpu = *pos; cpu < nr_cpu_ids; cpu++) { in synproxy_cpu_seq_next() 271 if (!cpu_possible(cpu)) in synproxy_cpu_seq_next() 273 *pos = cpu + 1; in synproxy_cpu_seq_next() 274 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
|
D | nf_conntrack_standalone.c | 312 int cpu; in ct_cpu_seq_start() local 317 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_start() 318 if (!cpu_possible(cpu)) in ct_cpu_seq_start() 320 *pos = cpu + 1; in ct_cpu_seq_start() 321 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start() 330 int cpu; in ct_cpu_seq_next() local 332 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in ct_cpu_seq_next() 333 if (!cpu_possible(cpu)) in ct_cpu_seq_next() 335 *pos = cpu + 1; in ct_cpu_seq_next() 336 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
|
D | nf_conntrack_core.c | 321 ct->cpu = smp_processor_id(); in nf_ct_add_to_dying_list() 322 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_dying_list() 336 ct->cpu = smp_processor_id(); in nf_ct_add_to_unconfirmed_list() 337 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_unconfirmed_list() 351 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_del_from_dying_or_unconfirmed_list() 1577 int cpu; in __nf_ct_unconfirmed_destroy() local 1579 for_each_possible_cpu(cpu) { in __nf_ct_unconfirmed_destroy() 1584 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in __nf_ct_unconfirmed_destroy() 1645 int cnt = 0, cpu; in untrack_refs() local 1647 for_each_possible_cpu(cpu) { in untrack_refs() [all …]
|
D | nft_counter.c | 56 int cpu; in nft_counter_fetch() local 59 for_each_possible_cpu(cpu) { in nft_counter_fetch() 60 cpu_stats = per_cpu_ptr(counter, cpu); in nft_counter_fetch()
|
D | nf_conntrack_netlink.c | 1280 int cpu; in ctnetlink_dump_list() local 1289 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_dump_list() 1292 if (!cpu_possible(cpu)) in ctnetlink_dump_list() 1295 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list() 1317 cb->args[0] = cpu; in ctnetlink_dump_list() 1979 __u16 cpu, const struct ip_conntrack_stat *st) in ctnetlink_ct_stat_cpu_fill_info() argument 1993 nfmsg->res_id = htons(cpu); in ctnetlink_ct_stat_cpu_fill_info() 2020 int cpu; in ctnetlink_ct_stat_cpu_dump() local 2026 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { in ctnetlink_ct_stat_cpu_dump() 2029 if (!cpu_possible(cpu)) in ctnetlink_ct_stat_cpu_dump() [all …]
|
D | xt_NFQUEUE.c | 96 int cpu = smp_processor_id(); in nfqueue_tg_v3() local 98 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
|
D | nf_conntrack_ecache.c | 88 int cpu, delay = -1; in ecache_work() local 93 for_each_possible_cpu(cpu) { in ecache_work() 96 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
|
D | nft_queue.c | 41 int cpu = raw_smp_processor_id(); in nft_queue_eval() local 43 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval()
|
D | x_tables.c | 1009 int cpu; in xt_free_table_info() local 1012 for_each_possible_cpu(cpu) in xt_free_table_info() 1013 kvfree(info->jumpstack[cpu]); in xt_free_table_info() 1100 int cpu; in xt_jumpstack_alloc() local 1125 for_each_possible_cpu(cpu) { in xt_jumpstack_alloc() 1127 i->jumpstack[cpu] = vmalloc_node(size, in xt_jumpstack_alloc() 1128 cpu_to_node(cpu)); in xt_jumpstack_alloc() 1130 i->jumpstack[cpu] = kmalloc_node(size, in xt_jumpstack_alloc() 1131 GFP_KERNEL, cpu_to_node(cpu)); in xt_jumpstack_alloc() 1132 if (i->jumpstack[cpu] == NULL) in xt_jumpstack_alloc()
|
/net/openvswitch/ |
D | flow.c | 76 int cpu = smp_processor_id(); in ovs_flow_stats_update() local 79 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update() 85 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update() 86 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 94 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update() 101 likely(!rcu_access_pointer(flow->stats[cpu]))) { in ovs_flow_stats_update() 119 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update() 124 flow->stats_last_writer = cpu; in ovs_flow_stats_update() 141 int cpu; in ovs_flow_stats_get() local 148 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) { in ovs_flow_stats_get() [all …]
|
D | flow_table.c | 137 int cpu; in flow_free() local 144 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) in flow_free() 145 if (flow->stats[cpu]) in flow_free() 147 (struct flow_stats __force *)flow->stats[cpu]); in flow_free()
|
/net/core/ |
D | gen_stats.c | 111 struct gnet_stats_basic_cpu __percpu *cpu) in __gnet_stats_copy_basic_cpu() argument 116 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu() 135 struct gnet_stats_basic_cpu __percpu *cpu, in __gnet_stats_copy_basic() argument 140 if (cpu) { in __gnet_stats_copy_basic() 141 __gnet_stats_copy_basic_cpu(bstats, cpu); in __gnet_stats_copy_basic() 169 struct gnet_stats_basic_cpu __percpu *cpu, in gnet_stats_copy_basic() argument 174 __gnet_stats_copy_basic(running, &bstats, cpu, b); in gnet_stats_copy_basic() 258 const struct gnet_stats_queue __percpu *cpu, in __gnet_stats_copy_queue() argument 262 if (cpu) { in __gnet_stats_copy_queue() 263 __gnet_stats_copy_queue_cpu(qstats, cpu); in __gnet_stats_copy_queue()
|
D | drop_monitor.c | 382 int cpu, rc; in init_net_drop_monitor() local 407 for_each_possible_cpu(cpu) { in init_net_drop_monitor() 408 data = &per_cpu(dm_cpu_data, cpu); in init_net_drop_monitor() 429 int cpu; in exit_net_drop_monitor() local 440 for_each_possible_cpu(cpu) { in exit_net_drop_monitor() 441 data = &per_cpu(dm_cpu_data, cpu); in exit_net_drop_monitor()
|
D | flow.c | 327 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) in flow_cache_percpu_empty() argument 332 fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_percpu_empty() 402 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) in flow_cache_cpu_prepare() argument 404 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu_prepare() 408 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); in flow_cache_cpu_prepare() 426 int res, cpu = (unsigned long) hcpu; in flow_cache_cpu() local 427 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu() 432 res = flow_cache_cpu_prepare(fc, cpu); in flow_cache_cpu()
|
D | dev.c | 1977 int cpu, u16 index) in remove_xps_queue() argument 1983 map = xmap_dereference(dev_maps->cpu_map[cpu]); in remove_xps_queue() 1990 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); in remove_xps_queue() 2004 int cpu, i; in netif_reset_xps_queues_gt() local 2013 for_each_possible_cpu(cpu) { in netif_reset_xps_queues_gt() 2015 if (!remove_xps_queue(dev_maps, cpu, i)) in netif_reset_xps_queues_gt() 2036 int cpu, u16 index) in expand_xps_map() argument 2058 cpu_to_node(cpu)); in expand_xps_map() 2076 int cpu, numa_node_id = -2; in netif_set_xps_queue() local 2084 for_each_online_cpu(cpu) { in netif_set_xps_queue() [all …]
|
/net/xfrm/ |
D | xfrm_ipcomp.c | 48 const int cpu = get_cpu(); in ipcomp_decompress() local 49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress() 50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress() 250 int cpu; in ipcomp_free_tfms() local 268 for_each_possible_cpu(cpu) { in ipcomp_free_tfms() 269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms() 279 int cpu; in ipcomp_alloc_tfms() local 306 for_each_possible_cpu(cpu) { in ipcomp_alloc_tfms() 311 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
|
/net/rds/ |
D | tcp_stats.c | 58 int cpu; in rds_tcp_stats_info_copy() local 63 for_each_online_cpu(cpu) { in rds_tcp_stats_info_copy() 64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
|
D | ib_stats.c | 89 int cpu; in rds_ib_stats_info_copy() local 94 for_each_online_cpu(cpu) { in rds_ib_stats_info_copy() 95 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
|
D | page.c | 184 unsigned int cpu; in rds_page_exit() local 186 for_each_possible_cpu(cpu) { in rds_page_exit() 189 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_exit() 190 rdsdebug("cpu %u\n", cpu); in rds_page_exit()
|
D | stats.c | 116 int cpu; in rds_stats_info() local 126 for_each_online_cpu(cpu) { in rds_stats_info() 127 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
|
D | ib_recv.c | 104 int cpu; in rds_ib_recv_alloc_cache() local 110 for_each_possible_cpu(cpu) { in rds_ib_recv_alloc_cache() 111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 139 int cpu; in rds_ib_cache_splice_all_lists() local 141 for_each_possible_cpu(cpu) { in rds_ib_cache_splice_all_lists() 142 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
|
/net/batman-adv/ |
D | main.h | 297 int cpu; in batadv_sum_counter() local 299 for_each_possible_cpu(cpu) { in batadv_sum_counter() 300 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); in batadv_sum_counter()
|
/net/ipv4/ |
D | route.c | 252 int cpu; in rt_cpu_seq_start() local 257 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_start() 258 if (!cpu_possible(cpu)) in rt_cpu_seq_start() 260 *pos = cpu+1; in rt_cpu_seq_start() 261 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_start() 268 int cpu; in rt_cpu_seq_next() local 270 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { in rt_cpu_seq_next() 271 if (!cpu_possible(cpu)) in rt_cpu_seq_next() 273 *pos = cpu+1; in rt_cpu_seq_next() 274 return &per_cpu(rt_cache_stat, cpu); in rt_cpu_seq_next() [all …]
|
/net/bridge/ |
D | br_device.c | 165 unsigned int cpu; in br_get_stats64() local 167 for_each_possible_cpu(cpu) { in br_get_stats64() 170 = per_cpu_ptr(br->stats, cpu); in br_get_stats64()
|