Home
last modified time | relevance | path

Searched refs:set (Results 1 – 11 of 11) sorted by relevance

/block/
Dblk-mq.c1761 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, in blk_mq_free_rqs() argument
1766 if (tags->rqs && set->ops->exit_request) { in blk_mq_free_rqs()
1774 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
1801 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, in blk_mq_alloc_rq_map() argument
1809 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); in blk_mq_alloc_rq_map()
1811 node = set->numa_node; in blk_mq_alloc_rq_map()
1814 BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags)); in blk_mq_alloc_rq_map()
1843 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, in blk_mq_alloc_rqs() argument
1850 node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx); in blk_mq_alloc_rqs()
1852 node = set->numa_node; in blk_mq_alloc_rqs()
[all …]
Dblk-mq-pci.c31 int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev) in blk_mq_pci_map_queues() argument
36 for (queue = 0; queue < set->nr_hw_queues; queue++) { in blk_mq_pci_map_queues()
42 set->mq_map[cpu] = queue; in blk_mq_pci_map_queues()
48 WARN_ON_ONCE(set->nr_hw_queues > 1); in blk_mq_pci_map_queues()
50 set->mq_map[cpu] = 0; in blk_mq_pci_map_queues()
Dblk-mq-rdma.c32 int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set, in blk_mq_rdma_map_queues() argument
38 for (queue = 0; queue < set->nr_hw_queues; queue++) { in blk_mq_rdma_map_queues()
44 set->mq_map[cpu] = queue; in blk_mq_rdma_map_queues()
50 return blk_mq_map_queues(set); in blk_mq_rdma_map_queues()
Dblk-mq-virtio.c32 int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set, in blk_mq_virtio_map_queues() argument
41 for (queue = 0; queue < set->nr_hw_queues; queue++) { in blk_mq_virtio_map_queues()
47 set->mq_map[cpu] = queue; in blk_mq_virtio_map_queues()
52 return blk_mq_map_queues(set); in blk_mq_virtio_map_queues()
Dblk-mq-sched.c306 struct blk_mq_tag_set *const set = hctx->queue->tag_set; in blk_mq_sched_restart() local
311 if (set->flags & BLK_MQ_F_TAG_SHARED) { in blk_mq_sched_restart()
320 list_for_each_entry_rcu_rr(q, queue, &set->tag_list, in blk_mq_sched_restart()
422 static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set, in blk_mq_sched_free_tags() argument
427 blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx); in blk_mq_sched_free_tags()
437 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_sched_alloc_tags() local
440 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, in blk_mq_sched_alloc_tags()
441 set->reserved_tags); in blk_mq_sched_alloc_tags()
445 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); in blk_mq_sched_alloc_tags()
447 blk_mq_sched_free_tags(set, hctx, hctx_idx); in blk_mq_sched_alloc_tags()
[all …]
Dblk-mq-cpumap.c33 int blk_mq_map_queues(struct blk_mq_tag_set *set) in blk_mq_map_queues() argument
35 unsigned int *map = set->mq_map; in blk_mq_map_queues()
36 unsigned int nr_queues = set->nr_hw_queues; in blk_mq_map_queues()
Dblk-mq-tag.c301 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set, in blk_mq_reinit_tagset() argument
309 for (i = 0; i < set->nr_hw_queues; i++) { in blk_mq_reinit_tagset()
310 struct blk_mq_tags *tags = set->tags[i]; in blk_mq_reinit_tagset()
319 ret = reinit_request(set->driver_data, in blk_mq_reinit_tagset()
424 struct blk_mq_tag_set *set = hctx->queue->tag_set; in blk_mq_tag_update_depth() local
438 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, in blk_mq_tag_update_depth()
442 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth); in blk_mq_tag_update_depth()
448 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num); in blk_mq_tag_update_depth()
Dblk-mq.h43 void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
46 struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
50 int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
Dblk-sysfs.c334 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); in queue_rq_affinity_show() local
337 return queue_var_show(set << force, page); in queue_rq_affinity_show()
482 int set = -1; in queue_wc_store() local
485 set = 1; in queue_wc_store()
488 set = 0; in queue_wc_store()
490 if (set == -1) in queue_wc_store()
494 if (set) in queue_wc_store()
Dblk-timeout.c44 int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); in part_timeout_show() local
46 return sprintf(buf, "%d\n", set != 0); in part_timeout_show()
Dgenhd.c1892 .set = disk_events_set_dfl_poll_msecs,