• Home
  • Raw
  • Download

Lines Matching refs:md

19 	struct mapped_device *md;  member
60 int dm_request_based(struct mapped_device *md) in dm_request_based() argument
62 return queue_is_mq(md->queue); in dm_request_based()
128 static void rq_end_stats(struct mapped_device *md, struct request *orig) in rq_end_stats() argument
130 if (unlikely(dm_stats_used(&md->stats))) { in rq_end_stats()
133 dm_stats_account_io(&md->stats, rq_data_dir(orig), in rq_end_stats()
144 static void rq_completed(struct mapped_device *md) in rq_completed() argument
149 dm_put(md); in rq_completed()
160 struct mapped_device *md = tio->md; in dm_end_request() local
166 rq_end_stats(md, rq); in dm_end_request()
168 rq_completed(md); in dm_end_request()
176 void dm_mq_kick_requeue_list(struct mapped_device *md) in dm_mq_kick_requeue_list() argument
178 __dm_mq_kick_requeue_list(md->queue, 0); in dm_mq_kick_requeue_list()
190 struct mapped_device *md = tio->md; in dm_requeue_original_request() local
194 rq_end_stats(md, rq); in dm_requeue_original_request()
201 rq_completed(md); in dm_requeue_original_request()
220 disable_discard(tio->md); in dm_done()
223 disable_write_same(tio->md); in dm_done()
226 disable_write_zeroes(tio->md); in dm_done()
261 struct mapped_device *md = tio->md; in dm_softirq_done() local
263 rq_end_stats(md, rq); in dm_softirq_done()
265 rq_completed(md); in dm_softirq_done()
341 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, in setup_clone()
355 struct mapped_device *md) in init_tio() argument
357 tio->md = md; in init_tio()
368 if (!md->init_tio_pdu) in init_tio()
382 struct mapped_device *md = tio->md; in map_request() local
400 trace_block_rq_remap(clone, disk_devt(dm_disk(md)), in map_request()
431 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf) in dm_attr_rq_based_seq_io_merge_deadline_show() argument
436 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md, in dm_attr_rq_based_seq_io_merge_deadline_store() argument
442 static void dm_start_request(struct mapped_device *md, struct request *orig) in dm_start_request() argument
446 if (unlikely(dm_stats_used(&md->stats))) { in dm_start_request()
450 dm_stats_account_io(&md->stats, rq_data_dir(orig), in dm_start_request()
462 dm_get(md); in dm_start_request()
468 struct mapped_device *md = set->driver_data; in dm_mq_init_request() local
475 tio->md = md; in dm_mq_init_request()
477 if (md->init_tio_pdu) { in dm_mq_init_request()
490 struct mapped_device *md = tio->md; in dm_mq_queue_rq() local
491 struct dm_target *ti = md->immutable_target; in dm_mq_queue_rq()
498 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) in dm_mq_queue_rq()
505 map = dm_get_live_table(md, &srcu_idx); in dm_mq_queue_rq()
507 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
511 dm_put_live_table(md, srcu_idx); in dm_mq_queue_rq()
517 dm_start_request(md, rq); in dm_mq_queue_rq()
520 init_tio(tio, rq, md); in dm_mq_queue_rq()
530 rq_end_stats(md, rq); in dm_mq_queue_rq()
531 rq_completed(md); in dm_mq_queue_rq()
544 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) in dm_mq_init_request_queue() argument
549 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id); in dm_mq_init_request_queue()
550 if (!md->tag_set) in dm_mq_init_request_queue()
553 md->tag_set->ops = &dm_mq_ops; in dm_mq_init_request_queue()
554 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth(); in dm_mq_init_request_queue()
555 md->tag_set->numa_node = md->numa_node_id; in dm_mq_init_request_queue()
556 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING; in dm_mq_init_request_queue()
557 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues(); in dm_mq_init_request_queue()
558 md->tag_set->driver_data = md; in dm_mq_init_request_queue()
560 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io); in dm_mq_init_request_queue()
564 md->tag_set->cmd_size += immutable_tgt->per_io_data_size; in dm_mq_init_request_queue()
565 md->init_tio_pdu = true; in dm_mq_init_request_queue()
568 err = blk_mq_alloc_tag_set(md->tag_set); in dm_mq_init_request_queue()
572 err = blk_mq_init_allocated_queue(md->tag_set, md->queue); in dm_mq_init_request_queue()
578 blk_mq_free_tag_set(md->tag_set); in dm_mq_init_request_queue()
580 kfree(md->tag_set); in dm_mq_init_request_queue()
581 md->tag_set = NULL; in dm_mq_init_request_queue()
586 void dm_mq_cleanup_mapped_device(struct mapped_device *md) in dm_mq_cleanup_mapped_device() argument
588 if (md->tag_set) { in dm_mq_cleanup_mapped_device()
589 blk_mq_free_tag_set(md->tag_set); in dm_mq_cleanup_mapped_device()
590 kfree(md->tag_set); in dm_mq_cleanup_mapped_device()
591 md->tag_set = NULL; in dm_mq_cleanup_mapped_device()