/block/ |
D | blk-mq-cpumap.c | 24 static int get_first_sibling(unsigned int cpu) in get_first_sibling() argument 28 ret = cpumask_first(topology_sibling_cpumask(cpu)); in get_first_sibling() 32 return cpu; in get_first_sibling() 39 unsigned int cpu, first_sibling, q = 0; in blk_mq_map_queues() local 41 for_each_possible_cpu(cpu) in blk_mq_map_queues() 42 map[cpu] = -1; in blk_mq_map_queues() 48 for_each_present_cpu(cpu) { in blk_mq_map_queues() 51 map[cpu] = queue_index(qmap, nr_queues, q++); in blk_mq_map_queues() 54 for_each_possible_cpu(cpu) { in blk_mq_map_queues() 55 if (map[cpu] != -1) in blk_mq_map_queues() [all …]
|
D | blk-stat.c | 56 int bucket, cpu; in blk_stat_add() local 64 cpu = get_cpu(); in blk_stat_add() 73 stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket]; in blk_stat_add() 84 int cpu; in blk_stat_timer_fn() local 89 for_each_online_cpu(cpu) { in blk_stat_timer_fn() 92 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_timer_fn() 141 int cpu; in blk_stat_add_callback() local 143 for_each_possible_cpu(cpu) { in blk_stat_add_callback() 146 cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); in blk_stat_add_callback()
|
D | blk-mq.h | 24 unsigned int cpu; member 96 unsigned int cpu) in blk_mq_map_queue_type() argument 98 return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; in blk_mq_map_queue_type() 139 unsigned int cpu) in __blk_mq_get_ctx() argument 141 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx() 271 int cpu; in blk_mq_clear_mq_map() local 273 for_each_possible_cpu(cpu) in blk_mq_clear_mq_map() 274 qmap->mq_map[cpu] = 0; in blk_mq_clear_mq_map()
|
D | blk-mq-rdma.c | 28 unsigned int queue, cpu; in blk_mq_rdma_map_queues() local 35 for_each_cpu(cpu, mask) in blk_mq_rdma_map_queues() 36 map->mq_map[cpu] = map->queue_offset + queue; in blk_mq_rdma_map_queues()
|
D | blk-mq-pci.c | 30 unsigned int queue, cpu; in blk_mq_pci_map_queues() local 37 for_each_cpu(cpu, mask) in blk_mq_pci_map_queues() 38 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_pci_map_queues()
|
D | blk-mq-virtio.c | 28 unsigned int queue, cpu; in blk_mq_virtio_map_queues() local 38 for_each_cpu(cpu, mask) in blk_mq_virtio_map_queues() 39 qmap->mq_map[cpu] = qmap->queue_offset + queue; in blk_mq_virtio_map_queues()
|
D | blk-mq-sysfs.c | 200 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); in blk_mq_register_hctx() 240 int cpu; in blk_mq_sysfs_deinit() local 242 for_each_possible_cpu(cpu) { in blk_mq_sysfs_deinit() 243 ctx = per_cpu_ptr(q->queue_ctx, cpu); in blk_mq_sysfs_deinit() 252 int cpu; in blk_mq_sysfs_init() local 256 for_each_possible_cpu(cpu) { in blk_mq_sysfs_init() 257 ctx = per_cpu_ptr(q->queue_ctx, cpu); in blk_mq_sysfs_init()
|
D | blk-mq.c | 447 unsigned int cpu; in blk_mq_alloc_request_hctx() local 480 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx() 481 if (cpu >= nr_cpu_ids) in blk_mq_alloc_request_hctx() 483 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx() 596 static int blk_softirq_cpu_dead(unsigned int cpu) in blk_softirq_cpu_dead() argument 598 blk_complete_reqs(&per_cpu(blk_cpu_done, cpu)); in blk_softirq_cpu_dead() 609 int cpu = raw_smp_processor_id(); in blk_mq_complete_need_ipi() local 624 if (cpu == rq->mq_ctx->cpu || in blk_mq_complete_need_ipi() 626 cpus_share_cache(cpu, rq->mq_ctx->cpu))) in blk_mq_complete_need_ipi() 630 return cpu_online(rq->mq_ctx->cpu); in blk_mq_complete_need_ipi() [all …]
|
D | blk-iolatency.c | 532 int cpu; in iolatency_check_latencies() local 536 for_each_online_cpu(cpu) { in iolatency_check_latencies() 538 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_check_latencies() 912 int cpu; in iolatency_ssd_stat() local 916 for_each_online_cpu(cpu) { in iolatency_ssd_stat() 918 s = per_cpu_ptr(iolat->stats, cpu); in iolatency_ssd_stat() 983 int cpu; in iolatency_pd_init() local 990 for_each_possible_cpu(cpu) { in iolatency_pd_init() 992 stat = per_cpu_ptr(iolat->stats, cpu); in iolatency_pd_init()
|
D | blk-cgroup.c | 155 int i, cpu; in blkg_alloc() local 177 for_each_possible_cpu(cpu) in blkg_alloc() 178 u64_stats_init(&per_cpu_ptr(blkg->iostat_cpu, cpu)->sync); in blkg_alloc() 472 int i, cpu; in blkcg_reset_stats() local 483 for_each_possible_cpu(cpu) { in blkcg_reset_stats() 485 per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_reset_stats() 801 static void blkcg_rstat_flush(struct cgroup_subsys_state *css, int cpu) in blkcg_rstat_flush() argument 814 struct blkg_iostat_set *bisc = per_cpu_ptr(blkg->iostat_cpu, cpu); in blkcg_rstat_flush() 870 int cpu; in blkcg_fill_root_iostats() local 874 for_each_possible_cpu(cpu) { in blkcg_fill_root_iostats() [all …]
|
D | genhd.c | 126 int cpu; in part_stat_read_all() local 129 for_each_possible_cpu(cpu) { in part_stat_read_all() 130 struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu); in part_stat_read_all() 147 int cpu; in part_in_flight() local 149 for_each_possible_cpu(cpu) { in part_in_flight() 150 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + in part_in_flight() 151 part_stat_local_read_cpu(part, in_flight[1], cpu); in part_in_flight() 162 int cpu; in part_in_flight_rw() local 166 for_each_possible_cpu(cpu) { in part_in_flight_rw() 167 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); in part_in_flight_rw() [all …]
|
D | kyber-iosched.c | 283 int cpu; in kyber_timer_fn() local 287 for_each_online_cpu(cpu) { in kyber_timer_fn() 290 cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu); in kyber_timer_fn() 549 rq->mq_ctx->cpu); in rq_clear_domain_token()
|
D | bio.c | 643 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) in bio_cpu_dead() argument 649 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); in bio_cpu_dead() 658 int cpu; in bio_alloc_cache_destroy() local 664 for_each_possible_cpu(cpu) { in bio_alloc_cache_destroy() 667 cache = per_cpu_ptr(bs->cache, cpu); in bio_alloc_cache_destroy()
|
D | blk-mq-tag.c | 185 sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); in blk_mq_put_tag() 188 sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); in blk_mq_put_tag()
|
D | blk-iocost.c | 1582 int cpu, rw; in ioc_lat_stat() local 1584 for_each_online_cpu(cpu) { in ioc_lat_stat() 1585 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat() 1667 int cpu; in iocg_flush_stat_one() local 1672 for_each_possible_cpu(cpu) { in iocg_flush_stat_one() 1674 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu)); in iocg_flush_stat_one() 2863 int i, cpu, ret; in blk_iocost_init() local 2875 for_each_possible_cpu(cpu) { in blk_iocost_init() 2876 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
|
D | blk-core.c | 1639 int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, in kblockd_mod_delayed_work_on() argument 1642 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay); in kblockd_mod_delayed_work_on()
|
D | blk-throttle.c | 2113 int i, cpu, rw; in throtl_update_latency_buckets() local 2128 for_each_possible_cpu(cpu) { in throtl_update_latency_buckets() 2133 cpu); in throtl_update_latency_buckets()
|
D | blk-mq-debugfs.c | 869 snprintf(name, sizeof(name), "cpu%u", ctx->cpu); in blk_mq_debugfs_register_ctx()
|