/block/ |
D | blk-mq-sched.h | 50 if (e->type->ops.allow_merge) in blk_mq_sched_allow_merge() 51 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 61 if (e->type->ops.completed_request) in blk_mq_sched_completed_request() 62 e->type->ops.completed_request(rq, now); in blk_mq_sched_completed_request() 72 if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request) in blk_mq_sched_requeue_request() 73 e->type->ops.requeue_request(rq); in blk_mq_sched_requeue_request() 81 if (e && e->type->ops.has_work) in blk_mq_sched_has_work() 82 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
|
D | blk-mq-sched.c | 108 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched() 120 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched() 201 unsigned short idx = ctx->index_hw[hctx->type]; in blk_mq_next_ctx() 351 enum hctx_type type; in blk_mq_sched_bio_merge() local 353 if (e && e->type->ops.bio_merge) { in blk_mq_sched_bio_merge() 354 ret = e->type->ops.bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge() 360 type = hctx->type; in blk_mq_sched_bio_merge() 362 list_empty_careful(&ctx->rq_lists[type])) in blk_mq_sched_bio_merge() 372 if (blk_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) in blk_mq_sched_bio_merge() 448 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request() [all …]
|
D | elevator.c | 65 if (e->type->ops.allow_merge) in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 173 eq->type = e; in elevator_alloc() 187 elevator_put(e->type); in elevator_release() 350 if (e->type->ops.request_merge) in elv_merge() 351 return e->type->ops.request_merge(q, req, bio); in elv_merge() 403 enum elv_merge type) in elv_merged_request() argument 407 if (e->type->ops.request_merged) in elv_merged_request() 408 e->type->ops.request_merged(q, rq, type); in elv_merged_request() 410 if (type == ELEVATOR_BACK_MERGE) in elv_merged_request() [all …]
|
D | t10-pi.c | 33 csum_fn *fn, enum t10_dif_type type) in t10_pi_generate() argument 43 if (type == T10_PI_TYPE1_PROTECTION) in t10_pi_generate() 57 csum_fn *fn, enum t10_dif_type type) in t10_pi_verify() argument 61 BUG_ON(type == T10_PI_TYPE0_PROTECTION); in t10_pi_verify() 67 if (type == T10_PI_TYPE1_PROTECTION || in t10_pi_verify() 68 type == T10_PI_TYPE2_PROTECTION) { in t10_pi_verify() 80 } else if (type == T10_PI_TYPE3_PROTECTION) { in t10_pi_verify() 289 enum t10_dif_type type) in ext_pi_crc64_generate() argument 299 if (type == T10_PI_TYPE1_PROTECTION) in ext_pi_crc64_generate() 320 enum t10_dif_type type) in ext_pi_crc64_verify() argument [all …]
|
D | blk-mq.h | 83 enum hctx_type type, in blk_mq_map_queue_type() argument 86 return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]); in blk_mq_map_queue_type() 91 enum hctx_type type = HCTX_TYPE_DEFAULT; in blk_mq_get_hctx_type() local 97 type = HCTX_TYPE_POLL; in blk_mq_get_hctx_type() 99 type = HCTX_TYPE_READ; in blk_mq_get_hctx_type() 100 return type; in blk_mq_get_hctx_type()
|
D | blk-mq.c | 113 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending() 122 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending() 399 e->type->ops.prepare_request) { in blk_mq_rq_ctx_init() 400 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init() 466 e->type->ops.limit_depth && in __blk_mq_alloc_requests() 468 e->type->ops.limit_depth(data->cmd_flags, data); in __blk_mq_alloc_requests() 559 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) in blk_mq_alloc_cached_request() 705 q->elevator->type->ops.finish_request) in blk_mq_free_request() 706 q->elevator->type->ops.finish_request(rq); in blk_mq_free_request() 1347 if (rq->mq_hctx->type != HCTX_TYPE_POLL) in blk_rq_is_poll() [all …]
|
D | kyber-iosched.c | 214 unsigned int sched_domain, unsigned int type) in flush_latency_buckets() argument 216 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in flush_latency_buckets() 217 atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type]; in flush_latency_buckets() 229 unsigned int sched_domain, unsigned int type, in calculate_percentile() argument 232 unsigned int *buckets = kqd->latency_buckets[sched_domain][type]; in calculate_percentile() 259 memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); in calculate_percentile() 262 kyber_latency_type_names[type], percentile, in calculate_percentile() 575 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; in kyber_bio_merge() 600 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; in kyber_insert_requests() 610 rq->mq_ctx->index_hw[hctx->type]); in kyber_insert_requests() [all …]
|
D | sed-opal.c | 55 enum opal_response_token type; member 804 tok->type = OPAL_DTA_TOKENID_SINT; in response_parse_tiny() 806 tok->type = OPAL_DTA_TOKENID_UINT; in response_parse_tiny() 821 tok->type = OPAL_DTA_TOKENID_BYTESTRING; in response_parse_short() 823 tok->type = OPAL_DTA_TOKENID_SINT; in response_parse_short() 828 tok->type = OPAL_DTA_TOKENID_UINT; in response_parse_short() 851 tok->type = OPAL_DTA_TOKENID_BYTESTRING; in response_parse_medium() 853 tok->type = OPAL_DTA_TOKENID_SINT; in response_parse_medium() 855 tok->type = OPAL_DTA_TOKENID_UINT; in response_parse_medium() 868 tok->type = OPAL_DTA_TOKENID_BYTESTRING; in response_parse_long() [all …]
|
D | blk-mq-debugfs.c | 412 seq_printf(m, "%s\n", hctx_types[hctx->type]); in hctx_type_show() 542 #define CTX_RQ_SEQ_OPS(name, type) \ argument 549 return seq_list_start(&ctx->rq_lists[type], *pos); \ 557 return seq_list_next(v, &ctx->rq_lists[type], pos); \ 774 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched() 847 struct elevator_type *e = q->elevator->type; in blk_mq_debugfs_register_sched_hctx()
|
D | blk-ioc.c | 48 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq() 77 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq() 366 struct elevator_type *et = q->elevator->type; in ioc_create_icq()
|
D | bfq-iosched.h | 1087 char type = bfq_bfqq_sync(bfqq) ? 'S' : 'A'; in bfq_bfqq_name() local 1090 snprintf(str, len, "bfq%d%c", bfqq->pid, type); in bfq_bfqq_name() 1092 snprintf(str, len, "bfqSHARED-%c", type); in bfq_bfqq_name()
|
D | ioctl.c | 293 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve() 311 return ops->pr_release(bdev, rsv.key, rsv.type); in blkdev_pr_release() 329 return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort); in blkdev_pr_preempt()
|
D | elevator.h | 108 struct elevator_type *type; member
|
D | blk-zoned.c | 511 switch (zone->type) { in blk_revalidate_zone_cb() 532 disk->disk_name, (int)zone->type, zone->start); in blk_revalidate_zone_cb()
|
D | mq-deadline.c | 201 enum elv_merge type) in dd_request_merged() argument 211 if (type == ELEVATOR_FRONT_MERGE) { in dd_request_merged()
|
D | blk-iocost.c | 195 #define TRACE_IOCG_PATH(type, iocg, ...) \ argument 198 if (trace_iocost_##type##_enabled()) { \ 202 trace_iocost_##type(iocg, trace_iocg_path, \ 209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0) argument
|
D | genhd.c | 1423 disk_to_dev(disk)->type = &disk_type; in __alloc_disk_node()
|
/block/partitions/ |
D | ibm.c | 70 char type[], in find_label() argument 114 strncpy(type, label->vol.vollbl, 4); in find_label() 117 strncpy(type, label->lnx.vollbl, 4); in find_label() 120 EBCASC(type, 4); in find_label() 229 if (!strcmp(info->type, "ECKD")) in find_lnx1_partitions() 300 char type[5] = {0,}; in ibm_partition() local 333 if (find_label(state, info, geo, blocksize, &labelsect, name, type, in ibm_partition() 335 if (!strncmp(type, "VOL1", 4)) { in ibm_partition() 338 } else if (!strncmp(type, "LNX1", 4)) { in ibm_partition() 342 } else if (!strncmp(type, "CMS1", 4)) { in ibm_partition()
|
D | mac.c | 89 if (!strncasecmp(part->type, "Linux_RAID", 10)) in mac_partition() 101 mac_fix_string(part->type, 32); in mac_partition() 107 if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0 in mac_partition() 108 || (strncasecmp(part->type, "Linux", 5) == 0 in mac_partition() 109 && strcasecmp(part->type, "Linux_swap") != 0)) { in mac_partition()
|
D | sgi.c | 30 __be32 type; /* Type of this partition */ member 79 if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION) in sgi_partition()
|
D | ldm.h | 127 u8 type; member 170 u8 type; member
|
D | mac.h | 18 char type[32]; /* string type description */ member
|
D | ldm.c | 751 comp->type = buffer[0x18 + r_vstate]; in ldm_parse_cmp3() 1118 vb->type = buf[0x13]; in ldm_parse_vblk() 1122 switch (vb->type) { in ldm_parse_vblk() 1134 (unsigned long long) vb->obj_id, vb->type); in ldm_parse_vblk() 1137 (unsigned long long) vb->obj_id, vb->type); in ldm_parse_vblk() 1175 switch (vb->type) { in ldm_ldmdb_add()
|
D | acorn.c | 415 char type[8]; member
|
D | core.c | 362 pdev->type = &part_type; in add_partition()
|