Home
last modified time | relevance | path

Searched refs:cpu (Results 1 – 19 of 19) sorted by relevance

/block/
Dblk-mq-cpumap.c24 static int get_first_sibling(unsigned int cpu) in get_first_sibling() argument
28 ret = cpumask_first(topology_sibling_cpumask(cpu)); in get_first_sibling()
32 return cpu; in get_first_sibling()
39 unsigned int cpu, first_sibling, q = 0; in blk_mq_map_queues() local
41 for_each_possible_cpu(cpu) in blk_mq_map_queues()
42 map[cpu] = -1; in blk_mq_map_queues()
48 for_each_present_cpu(cpu) { in blk_mq_map_queues()
51 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues()
54 for_each_possible_cpu(cpu) { in blk_mq_map_queues()
55 if (map[cpu] != -1) in blk_mq_map_queues()
[all …]
Dblk-stat.c56 int bucket, cpu; in blk_stat_add() local
64 cpu = get_cpu(); in blk_stat_add()
73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add()
84 int cpu; in blk_stat_timer_fn() local
89 for_each_online_cpu(cpu) { in blk_stat_timer_fn()
92 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_timer_fn()
141 int cpu; in blk_stat_add_callback() local
143 for_each_possible_cpu(cpu) { in blk_stat_add_callback()
146 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_add_callback()
Dblk-mq.h24 unsigned int cpu; member
96 unsigned int cpu) in blk_mq_map_queue_type() argument
98 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; in blk_mq_map_queue_type()
137 unsigned int cpu) in __blk_mq_get_ctx() argument
139 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
249 int cpu; in blk_mq_clear_mq_map() local
251 for_each_possible_cpu(cpu) in blk_mq_clear_mq_map()
252 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
Dblk-mq-rdma.c28 unsigned int queue, cpu; in blk_mq_rdma_map_queues() local
35 for_each_cpu(cpu, mask) in blk_mq_rdma_map_queues()
36 map->mq_map[cpu] = map->queue_offset + queue; in blk_mq_rdma_map_queues()
Dblk-mq-pci.c30 unsigned int queue, cpu; in blk_mq_pci_map_queues() local
37 for_each_cpu(cpu, mask) in blk_mq_pci_map_queues()
38 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_pci_map_queues()
Dblk-mq-virtio.c28 unsigned int queue, cpu; in blk_mq_virtio_map_queues() local
38 for_each_cpu(cpu, mask) in blk_mq_virtio_map_queues()
39 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_virtio_map_queues()
Dblk-mq-sysfs.c255 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); in blk_mq_register_hctx()
295 int cpu; in blk_mq_sysfs_deinit() local
297 for_each_possible_cpu(cpu) { in blk_mq_sysfs_deinit()
298 ctx = per_cpu_ptr(q->queue_ctx, cpu); in blk_mq_sysfs_deinit()
307 int cpu; in blk_mq_sysfs_init() local
311 for_each_possible_cpu(cpu) { in blk_mq_sysfs_init()
312 ctx = per_cpu_ptr(q->queue_ctx, cpu); in blk_mq_sysfs_init()
Dmq-deadline-cgroup.h60 unsigned int cpu; \
65 for_each_present_cpu(cpu) \
66 sum += local_read(&per_cpu_ptr((ddcg)->stats, cpu)-> \
Dblk-mq.c443 unsigned int cpu; in blk_mq_alloc_request_hctx() local
476 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx()
477 if (cpu >= nr_cpu_ids) in blk_mq_alloc_request_hctx()
479 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx()
624 static int blk_softirq_cpu_dead(unsigned int cpu) in blk_softirq_cpu_dead() argument
631 list_splice_init(&per_cpu(blk_cpu_done, cpu), in blk_softirq_cpu_dead()
661 int cpu = raw_smp_processor_id(); in blk_mq_complete_need_ipi() local
668 if (cpu == rq->mq_ctx->cpu || in blk_mq_complete_need_ipi()
670 cpus_share_cache(cpu, rq->mq_ctx->cpu))) in blk_mq_complete_need_ipi()
674 return cpu_online(rq->mq_ctx->cpu); in blk_mq_complete_need_ipi()
[all …]
Dblk-cgroup.c155 int i, cpu; in blkg_alloc() local
177 for_each_possible_cpu(cpu) in blkg_alloc()
178 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); in blkg_alloc()
446 int i, cpu; in blkcg_reset_stats() local
457 for_each_possible_cpu(cpu) { in blkcg_reset_stats()
459 per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_reset_stats()
765 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) in blkcg_rstat_flush() argument
774 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_rstat_flush()
828 int cpu; in blkcg_fill_root_iostats() local
831 for_each_possible_cpu(cpu) { in blkcg_fill_root_iostats()
[all …]
Dblk-iolatency.c532 int cpu; in iolatency_check_latencies() local
536 for_each_online_cpu(cpu) { in iolatency_check_latencies()
538 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_check_latencies()
909 int cpu; in iolatency_ssd_stat() local
913 for_each_online_cpu(cpu) { in iolatency_ssd_stat()
915 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_ssd_stat()
979 int cpu; in iolatency_pd_init() local
986 for_each_possible_cpu(cpu) { in iolatency_pd_init()
988 stat = per_cpu_ptr(iolat->stats, cpu); in iolatency_pd_init()
Dgenhd.c97 int cpu; in part_stat_read_all() local
100 for_each_possible_cpu(cpu) { in part_stat_read_all()
101 struct disk_stats *ptr = per_cpu_ptr(part->dkstats, cpu); in part_stat_read_all()
118 int cpu; in part_in_flight() local
120 for_each_possible_cpu(cpu) { in part_in_flight()
121 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + in part_in_flight()
122 part_stat_local_read_cpu(part, in_flight[1], cpu); in part_in_flight()
132 int cpu; in part_in_flight_rw() local
136 for_each_possible_cpu(cpu) { in part_in_flight_rw()
137 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); in part_in_flight_rw()
[all …]
Dkyber-iosched.c280 int cpu; in kyber_timer_fn() local
284 for_each_online_cpu(cpu) { in kyber_timer_fn()
287 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu); in kyber_timer_fn()
548 rq->mq_ctx->cpu); in rq_clear_domain_token()
Dblk-mq-tag.c190 sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); in blk_mq_put_tag()
193 sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); in blk_mq_put_tag()
Dblk-iocost.c1536 int cpu, rw; in ioc_lat_stat() local
1538 for_each_online_cpu(cpu) { in ioc_lat_stat()
1539 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat()
1621 int cpu; in iocg_flush_stat_one() local
1626 for_each_possible_cpu(cpu) { in iocg_flush_stat_one()
1628 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu)); in iocg_flush_stat_one()
2841 int i, cpu, ret; in blk_iocost_init() local
2853 for_each_possible_cpu(cpu) { in blk_iocost_init()
2854 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
Dmq-deadline-main.c122 unsigned int cpu; \
127 for_each_present_cpu(cpu) \
128 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
Dblk-core.c1674 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, in kblockd_mod_delayed_work_on() argument
1677 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); in kblockd_mod_delayed_work_on()
Dblk-throttle.c2107 int i, cpu, rw; in throtl_update_latency_buckets() local
2122 for_each_possible_cpu(cpu) { in throtl_update_latency_buckets()
2127 cpu); in throtl_update_latency_buckets()
Dblk-mq-debugfs.c871 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); in blk_mq_debugfs_register_ctx()