/block/ |
D | elevator.c | 63 struct elevator_queue *e = q->elevator; in elv_iosched_allow_bio_merge() local 65 if (e->type->ops.allow_merge) in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 101 static bool elevator_match(const struct elevator_type *e, const char *name, in elevator_match() argument 104 if (!elv_support_features(e->elevator_features, required_features)) in elevator_match() 106 if (!strcmp(e->elevator_name, name)) in elevator_match() 108 if (e->elevator_alias && !strcmp(e->elevator_alias, name)) in elevator_match() 125 struct elevator_type *e; in elevator_find() local 127 list_for_each_entry(e, &elv_list, list) { in elevator_find() 128 if (elevator_match(e, name, required_features)) in elevator_find() [all …]
|
D | blk-mq-sched.h | 27 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); 28 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); 45 struct elevator_queue *e = q->elevator; in blk_mq_sched_allow_merge() local 47 if (e && e->type->ops.allow_merge) in blk_mq_sched_allow_merge() 48 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 55 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request() local 57 if (e && e->type->ops.completed_request) in blk_mq_sched_completed_request() 58 e->type->ops.completed_request(rq, now); in blk_mq_sched_completed_request() 64 struct elevator_queue *e = q->elevator; in blk_mq_sched_requeue_request() local 66 if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) in blk_mq_sched_requeue_request() [all …]
|
D | blk-mq-sched.c | 119 struct elevator_queue *e = q->elevator; in __blk_mq_do_dispatch_sched() local 134 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched() 145 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched() 283 struct elevator_queue *e = q->elevator; in __blk_mq_sched_dispatch_requests() local 284 const bool has_sched_dispatch = e && e->type->ops.dispatch_request; in __blk_mq_sched_dispatch_requests() 356 struct elevator_queue *e = q->elevator; in __blk_mq_sched_bio_merge() local 362 if (e && e->type->ops.bio_merge) in __blk_mq_sched_bio_merge() 363 return e->type->ops.bio_merge(q, bio, nr_segs); in __blk_mq_sched_bio_merge() 430 struct elevator_queue *e = q->elevator; in blk_mq_sched_insert_request() local 435 WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG)); in blk_mq_sched_insert_request() [all …]
|
D | badblocks.c | 208 sector_t e = a + BB_LEN(p[lo]); in badblocks_set() local 211 if (e >= s) { in badblocks_set() 213 if (s == a && s + sectors >= e) in badblocks_set() 219 if (e < s + sectors) in badblocks_set() 220 e = s + sectors; in badblocks_set() 221 if (e - a <= BB_MAX_LEN) { in badblocks_set() 222 p[lo] = BB_MAKE(a, e-a, ack); in badblocks_set() 223 s = e; in badblocks_set() 232 sectors = e - s; in badblocks_set() 240 sector_t e = a + BB_LEN(p[hi]); in badblocks_set() local [all …]
|
D | kyber-iosched.c | 416 static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) in kyber_init_sched() argument 421 eq = elevator_alloc(q, e); in kyber_init_sched() 439 static void kyber_exit_sched(struct elevator_queue *e) in kyber_exit_sched() argument 441 struct kyber_queue_data *kqd = e->elevator_data; in kyber_exit_sched() 861 static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \ 864 struct kyber_queue_data *kqd = e->elevator_data; \ 869 static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \ 872 struct kyber_queue_data *kqd = e->elevator_data; \
|
D | mq-deadline-main.c | 566 static void dd_exit_sched(struct elevator_queue *e) in dd_exit_sched() argument 568 struct deadline_data *dd = e->elevator_data; in dd_exit_sched() 588 static int dd_init_sched(struct request_queue *q, struct elevator_type *e) in dd_init_sched() argument 601 eq = elevator_alloc(q, e); in dd_init_sched() 871 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 873 struct deadline_data *dd = e->elevator_data; \ 889 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 891 struct deadline_data *dd = e->elevator_data; \
|
D | bfq-iosched.c | 6400 static void bfq_exit_queue(struct elevator_queue *e) in bfq_exit_queue() argument 6402 struct bfq_data *bfqd = e->elevator_data; in bfq_exit_queue() 6447 static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) in bfq_init_queue() argument 6452 eq = elevator_alloc(q, e); in bfq_init_queue() 6609 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 6611 struct bfq_data *bfqd = e->elevator_data; \ 6631 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 6633 struct bfq_data *bfqd = e->elevator_data; \ 6643 __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 6645 struct bfq_data *bfqd = e->elevator_data; \ [all …]
|
D | blk-mq.c | 333 struct elevator_queue *e = data->q->elevator; in blk_mq_rq_ctx_init() local 336 if (e && e->type->ops.prepare_request) { in blk_mq_rq_ctx_init() 337 if (e->type->icq_cache) in blk_mq_rq_ctx_init() 340 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init() 353 struct elevator_queue *e = q->elevator; in __blk_mq_alloc_request() local 365 if (e) { in __blk_mq_alloc_request() 372 e->type->ops.limit_depth && in __blk_mq_alloc_request() 374 e->type->ops.limit_depth(data->cmd_flags, data); in __blk_mq_alloc_request() 380 if (!e) in __blk_mq_alloc_request() 522 struct elevator_queue *e = q->elevator; in blk_mq_free_request() local [all …]
|
D | blk-mq-debugfs.c | 923 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched() local 932 if (!e->queue_debugfs_attrs) in blk_mq_debugfs_register_sched() 937 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); in blk_mq_debugfs_register_sched() 994 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched_hctx() local 996 if (!e->hctx_debugfs_attrs) in blk_mq_debugfs_register_sched_hctx() 1002 e->hctx_debugfs_attrs); in blk_mq_debugfs_register_sched_hctx()
|
D | blk.h | 212 struct elevator_queue *e) in elevator_exit() argument 217 __elevator_exit(q, e); in elevator_exit()
|
D | Kconfig | 49 protocols (e.g. Task Management Functions and SMP in Serial
|
/block/partitions/ |
D | Kconfig | 185 label, i.e. DOS partition table. It does not support GPT labelled
|