/block/ |
D | elevator.c | 63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() local 65 if (e->type->ops.allow_merge) in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 101 static bool elevator_match(const struct elevator_type *e, const char *name, in elevator_match() argument 104 if (!elv_support_features(e->elevator_features, required_features)) in elevator_match() 106 if (!strcmp(e->elevator_name, name)) in elevator_match() 108 if (e->elevator_alias && !strcmp(e->elevator_alias, name)) in elevator_match() 125 struct elevator_type *e; in elevator_find() local 127 list_for_each_entry(e, &elv_list, list) { in elevator_find() 128 if (elevator_match(e, name, required_features)) in elevator_find() [all …]
|
D | blk-mq-sched.h | 29 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 30 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 47 struct elevator_queue *e = q->elevator; in blk_mq_sched_allow_merge() local 49 if (e && e->type->ops.allow_merge) in blk_mq_sched_allow_merge() 50 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 57 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request() local 59 if (e && e->type->ops.completed_request) in blk_mq_sched_completed_request() 60 e->type->ops.completed_request(rq, now); in blk_mq_sched_completed_request() 66 struct elevator_queue *e = q->elevator; in blk_mq_sched_requeue_request() local 68 if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) in blk_mq_sched_requeue_request() [all …]
|
D | blk-mq-sched.c | 120 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched() local 136 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched() 148 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched() 375 struct elevator_queue *e = q->elevator; in __blk_mq_sched_bio_merge() local 381 if (e && e->type->ops.bio_merge) in __blk_mq_sched_bio_merge() 382 return e->type->ops.bio_merge(q, bio, nr_segs); in __blk_mq_sched_bio_merge() 435 struct elevator_queue *e = q->elevator; in blk_mq_sched_insert_request() local 439 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); in blk_mq_sched_insert_request() 468 if (e) { in blk_mq_sched_insert_request() 472 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request() [all …]
|
D | badblocks.c | 208 sector_t e = a + BB_LEN(p[lo]); in badblocks_set() local 211 if (e >= s) { in badblocks_set() 213 if (s == a && s + sectors >= e) in badblocks_set() 219 if (e < s + sectors) in badblocks_set() 220 e = s + sectors; in badblocks_set() 221 if (e - a <= BB_MAX_LEN) { in badblocks_set() 222 p[lo] = BB_MAKE(a, e-a, ack); in badblocks_set() 223 s = e; in badblocks_set() 232 sectors = e - s; in badblocks_set() 240 sector_t e = a + BB_LEN(p[hi]); in badblocks_set() local [all …]
|
D | kyber-iosched.c | 407 static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) in kyber_init_sched() argument 412 eq = elevator_alloc(q, e); in kyber_init_sched() 430 static void kyber_exit_sched(struct elevator_queue *e) in kyber_exit_sched() argument 432 struct kyber_queue_data *kqd = e->elevator_data; in kyber_exit_sched() 862 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \ 865 struct kyber_queue_data *kqd = e->elevator_data; \ 870 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \ 873 struct kyber_queue_data *kqd = e->elevator_data; \
|
D | mq-deadline.c | 676 static void dd_exit_sched(struct elevator_queue *e) in dd_exit_sched() argument 678 struct deadline_data *dd = e->elevator_data; in dd_exit_sched() 705 static int dd_init_sched(struct request_queue *q, struct elevator_type *e) in dd_init_sched() argument 712 eq = elevator_alloc(q, e); in dd_init_sched() 981 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 983 struct deadline_data *dd = e->elevator_data; \ 999 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 1001 struct deadline_data *dd = e->elevator_data; \
|
D | blk-mq-debugfs.c | 921 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched() local 930 if (!e->queue_debugfs_attrs) in blk_mq_debugfs_register_sched() 935 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); in blk_mq_debugfs_register_sched() 992 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched_hctx() local 1002 if (!e->hctx_debugfs_attrs) in blk_mq_debugfs_register_sched_hctx() 1008 e->hctx_debugfs_attrs); in blk_mq_debugfs_register_sched_hctx()
|
D | blk-mq.c | 340 struct elevator_queue *e = data->q->elevator; in blk_mq_rq_ctx_init() local 343 if (e && e->type->ops.prepare_request) { in blk_mq_rq_ctx_init() 344 if (e->type->icq_cache) in blk_mq_rq_ctx_init() 347 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init() 360 struct elevator_queue *e = q->elevator; in __blk_mq_alloc_request() local 371 if (e) { in __blk_mq_alloc_request() 378 e->type->ops.limit_depth && in __blk_mq_alloc_request() 380 e->type->ops.limit_depth(data->cmd_flags, data); in __blk_mq_alloc_request() 386 if (!e) in __blk_mq_alloc_request() 521 struct elevator_queue *e = q->elevator; in blk_mq_free_request() local [all …]
|
D | bfq-iosched.c | 6891 static void bfq_exit_queue(struct elevator_queue *e) in bfq_exit_queue() argument 6893 struct bfq_data *bfqd = e->elevator_data; in bfq_exit_queue() 6940 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) in bfq_init_queue() argument 6945 eq = elevator_alloc(q, e); in bfq_init_queue() 7102 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 7104 struct bfq_data *bfqd = e->elevator_data; \ 7124 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 7126 struct bfq_data *bfqd = e->elevator_data; \ 7136 __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 7138 struct bfq_data *bfqd = e->elevator_data; \ [all …]
|
D | blk.h | 246 struct elevator_queue *e) in elevator_exit() argument 251 __elevator_exit(q, e); in elevator_exit()
|
/block/partitions/ |
D | Kconfig | 185 label, i.e. DOS partition table. It does not support GPT labelled
|