/block/ |
D | cmdline-parser.c | 43 char *next = strchr(++partdef, ')'); in parse_subpart() local 45 if (!next) { in parse_subpart() 51 length = min_t(int, next - partdef, in parse_subpart() 56 partdef = ++next; in parse_subpart() 93 char *next; in parse_parts() local 105 next = strchr(bdevdef, ':'); in parse_parts() 106 if (!next) { in parse_parts() 111 length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1); in parse_parts() 118 while (next && *(++next)) { in parse_parts() 119 bdevdef = next; in parse_parts() [all …]
|
D | blk-merge.c | 20 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap() argument 49 bio_get_first_bvec(next, &nb); in bio_will_gap() 623 struct request *next) in req_attempt_discard_merge() argument 629 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 633 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); in req_attempt_discard_merge() 641 struct request *next) in ll_merge_requests_fn() argument 645 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn() 651 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > in ll_merge_requests_fn() 655 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; in ll_merge_requests_fn() 659 if (!blk_cgroup_mergeable(req, next->bio)) in ll_merge_requests_fn() [all …]
|
D | blk-rq-qos.c | 37 rqos = rqos->next; in __rq_qos_cleanup() 46 rqos = rqos->next; in __rq_qos_done() 55 rqos = rqos->next; in __rq_qos_issue() 64 rqos = rqos->next; in __rq_qos_requeue() 73 rqos = rqos->next; in __rq_qos_throttle() 82 rqos = rqos->next; in __rq_qos_track() 91 rqos = rqos->next; in __rq_qos_merge() 100 rqos = rqos->next; in __rq_qos_done_bio() 109 rqos = rqos->next; in __rq_qos_queue_depth_changed() 301 q->rq_qos = rqos->next; in rq_qos_exit()
|
D | blk-integrity.c | 165 struct request *next) in blk_integrity_merge_rq() argument 167 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0) in blk_integrity_merge_rq() 170 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0) in blk_integrity_merge_rq() 174 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq() 177 if (req->nr_integrity_segments + next->nr_integrity_segments > in blk_integrity_merge_rq() 181 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq() 191 struct bio *next = bio->bi_next; in blk_integrity_merge_bio() local 204 bio->bi_next = next; in blk_integrity_merge_bio()
|
D | blk-rq-qos.h | 32 struct rq_qos *next; member 66 for (rqos = q->rq_qos; rqos; rqos = rqos->next) { in rq_qos_id() 101 rqos->next = q->rq_qos; in rq_qos_add() 122 for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { in rq_qos_del() 124 *cur = rqos->next; in rq_qos_del()
|
D | mq-deadline-main.c | 230 struct request *next) in dd_merged_requests() argument 233 const u8 ioprio_class = dd_rq_ioclass(next); in dd_merged_requests() 235 struct dd_blkcg *blkcg = next->elv.priv[0]; in dd_merged_requests() 244 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { in dd_merged_requests() 245 if (time_before((unsigned long)next->fifo_time, in dd_merged_requests() 247 list_move(&req->queuelist, &next->queuelist); in dd_merged_requests() 248 req->fifo_time = next->fifo_time; in dd_merged_requests() 255 deadline_remove_request(q, &dd->per_prio[prio], next); in dd_merged_requests() 288 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); in deadline_check_fifo() 313 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next); in deadline_fifo_request() [all …]
|
D | blk-crypto-internal.h | 46 struct request *next) in bio_crypt_ctx_merge_rq() argument 49 next->crypt_ctx); in bio_crypt_ctx_merge_rq() 89 struct request *next) in bio_crypt_ctx_merge_rq() argument
|
D | t10-pi.c | 68 goto next; in t10_pi_verify() 81 goto next; in t10_pi_verify() 94 next: in t10_pi_verify()
|
D | blk.h | 119 struct bio *next) in integrity_req_gap_back_merge() argument 122 struct bio_integrity_payload *bip_next = bio_integrity(next); in integrity_req_gap_back_merge() 152 struct bio *next) in integrity_req_gap_back_merge() argument 240 struct request *next);
|
D | blk-mq.c | 599 rq = list_entry(local_list.next, struct request, ipi_list); in blk_done_softirq() 619 if (list->next == &rq->ipi_list) in blk_mq_trigger_softirq() 806 struct request *rq, *next; in blk_mq_requeue_work() local 812 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work() 830 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work() 934 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) in blk_mq_req_expired() argument 947 if (*next == 0) in blk_mq_req_expired() 948 *next = deadline; in blk_mq_req_expired() 949 else if (time_after(*next, deadline)) in blk_mq_req_expired() 950 *next = deadline; in blk_mq_req_expired() [all …]
|
D | badblocks.c | 177 sector_t next = s + sectors; in badblocks_set() local 180 next += (1<<bb->shift) - 1; in badblocks_set() 181 next >>= bb->shift; in badblocks_set() 182 sectors = next - s; in badblocks_set()
|
D | elevator.c | 232 struct hlist_node *next; in elv_rqhash_find() local 235 hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { in elv_rqhash_find() 407 struct request *next) in elv_merge_requests() argument 412 e->type->ops.requests_merged(q, rq, next); in elv_merge_requests()
|
D | bfq-iosched.c | 889 rq = rq_entry_fifo(bfqq->fifo.next); in bfq_check_fifo() 904 struct request *next, *prev = NULL; in bfq_find_next_rq() local 907 next = bfq_check_fifo(bfqq, last); in bfq_find_next_rq() 908 if (next) in bfq_find_next_rq() 909 return next; in bfq_find_next_rq() 915 next = rb_entry_rq(rbnext); in bfq_find_next_rq() 919 next = rb_entry_rq(rbnext); in bfq_find_next_rq() 922 return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last)); in bfq_find_next_rq() 2336 struct request *next) in bfq_requests_merged() argument 2339 *next_bfqq = RQ_BFQQ(next); in bfq_requests_merged() [all …]
|
D | bfq-wf2q.c | 345 struct rb_node *next; in bfq_idle_extract() local 348 next = rb_next(&entity->rb_node); in bfq_idle_extract() 349 st->first_idle = bfq_entity_of(next); in bfq_idle_extract() 353 next = rb_prev(&entity->rb_node); in bfq_idle_extract() 354 st->last_idle = bfq_entity_of(next); in bfq_idle_extract()
|
D | genhd.c | 396 struct blk_major_name *next; member 413 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next) in blkdev_show() 480 p->next = NULL; in register_blkdev() 483 for (n = &major_names[index]; *n; n = &(*n)->next) { in register_blkdev() 511 for (n = &major_names[index]; *n; n = &(*n)->next) in unregister_blkdev() 518 *n = p->next; in unregister_blkdev() 1201 .next = disk_seqf_next, 1667 .next = disk_seqf_next,
|
D | blk-mq-debugfs.c | 70 .next = queue_requeue_list_next, 390 .next = hctx_dispatch_next, 659 .next = ctx_##name##_rq_list_next, \ 855 rqos = rqos->next; in blk_mq_debugfs_register()
|
D | blk-iolatency.c | 685 goto next; in blkiolatency_timer_fn() 691 goto next; in blkiolatency_timer_fn() 716 next: in blkiolatency_timer_fn()
|
D | bio.c | 1357 struct bio *bio, *next; in bio_dirty_fn() local 1360 next = bio_dirty_list; in bio_dirty_fn() 1364 while ((bio = next) != NULL) { in bio_dirty_fn() 1365 next = bio->bi_private; in bio_dirty_fn()
|
D | kyber-iosched.c | 592 struct request *rq, *next; in kyber_insert_requests() local 594 list_for_each_entry_safe(rq, next, rq_list, queuelist) { in kyber_insert_requests() 937 .next = kyber_##name##_rqs_next, \
|
D | blk-ioc.c | 218 struct io_cq *icq = list_entry(icq_list->next, in __ioc_clear_queue()
|
D | sed-opal.c | 2121 struct opal_suspend_data *suspend, *next; in clean_opal_dev() local 2124 list_for_each_entry_safe(suspend, next, &dev->unlk_lst, node) { in clean_opal_dev()
|
/block/partitions/ |
D | msdos.c | 149 if (state->next == state->limit) in parse_extended() 173 sector_t offs, size, next; in parse_extended() local 182 next = this_sector + offs; in parse_extended() 186 if (next < first_sector) in parse_extended() 188 if (next + size > first_sector + first_size) in parse_extended() 192 put_partition(state, state->next, next, size); in parse_extended() 193 set_info(state, state->next, disksig); in parse_extended() 195 state->parts[state->next].flags = ADDPART_FLAG_RAID; in parse_extended() 197 if (++state->next == state->limit) in parse_extended() 282 for (i = 0; i < max_nparts && state->next < state->limit; i++) { in parse_solaris_x86() [all …]
|
D | acorn.c | 529 sector_t next; in adfspart_check_EESOX() local 534 next = le32_to_cpu(p->start); in adfspart_check_EESOX() 536 put_partition(state, slot++, start, next - start); in adfspart_check_EESOX() 537 start = next; in adfspart_check_EESOX()
|
D | check.h | 21 int next; member
|