/block/partitions/ |
D | acorn.c | 25 adfs_partition(struct parsed_partitions *state, char *name, char *data, in adfs_partition() argument 31 if (adfs_checkbblk(data)) in adfs_partition() 34 dr = (struct adfs_discrecord *)(data + 0x1c0); in adfs_partition() 164 unsigned char *data; in adfspart_check_CUMANA() local 186 data = read_part_sector(state, start_blk * 2 + 6, §); in adfspart_check_CUMANA() 187 if (!data) in adfspart_check_CUMANA() 193 dr = adfs_partition(state, name, data, first_sector, slot++); in adfspart_check_CUMANA() 199 nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) * in adfspart_check_CUMANA() 211 switch (data[0x1fc] & 15) { in adfspart_check_CUMANA() 254 unsigned char *data; in adfspart_check_ADFS() local [all …]
|
D | ldm.c | 67 static bool ldm_parse_privhead(const u8 *data, struct privhead *ph) in ldm_parse_privhead() argument 71 BUG_ON(!data || !ph); in ldm_parse_privhead() 72 if (MAGIC_PRIVHEAD != get_unaligned_be64(data)) { in ldm_parse_privhead() 77 ph->ver_major = get_unaligned_be16(data + 0x000C); in ldm_parse_privhead() 78 ph->ver_minor = get_unaligned_be16(data + 0x000E); in ldm_parse_privhead() 79 ph->logical_disk_start = get_unaligned_be64(data + 0x011B); in ldm_parse_privhead() 80 ph->logical_disk_size = get_unaligned_be64(data + 0x0123); in ldm_parse_privhead() 81 ph->config_start = get_unaligned_be64(data + 0x012B); in ldm_parse_privhead() 82 ph->config_size = get_unaligned_be64(data + 0x0133); in ldm_parse_privhead() 104 if (uuid_parse(data + 0x0030, &ph->disk_id)) { in ldm_parse_privhead() [all …]
|
D | amiga.c | 39 unsigned char *data; in amiga_partition() local 53 data = read_part_sector(state, blk, §); in amiga_partition() 54 if (!data) { in amiga_partition() 60 if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) in amiga_partition() 63 rdb = (struct RigidDiskBlock *)data; in amiga_partition() 64 if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0) in amiga_partition() 69 *(__be32 *)(data+0xdc) = 0; in amiga_partition() 70 if (checksum_block((__be32 *)data, in amiga_partition() 100 data = read_part_sector(state, blk, §); in amiga_partition() 101 if (!data) { in amiga_partition() [all …]
|
D | sysv68.c | 54 unsigned char *data; in sysv68_partition() local 59 data = read_part_sector(state, 0, §); in sysv68_partition() 60 if (!data) in sysv68_partition() 63 b = (struct dkblk0 *)data; in sysv68_partition() 72 data = read_part_sector(state, i, §); in sysv68_partition() 73 if (!data) in sysv68_partition() 79 slice = (struct slice *)data; in sysv68_partition()
|
D | msdos.c | 136 unsigned char *data; in parse_extended() local 152 data = read_part_sector(state, this_sector, §); in parse_extended() 153 if (!data) in parse_extended() 156 if (!msdos_magic_present(data + 510)) in parse_extended() 159 p = (struct msdos_partition *) (data + 0x1be); in parse_extended() 534 unsigned char *data; in parse_minix() local 538 data = read_part_sector(state, offset, §); in parse_minix() 539 if (!data) in parse_minix() 542 p = (struct msdos_partition *)(data + 0x1be); in parse_minix() 547 if (msdos_magic_present(data + 510) && in parse_minix() [all …]
|
D | mac.c | 34 unsigned char *data; in mac_partition() local 55 data = read_part_sector(state, datasize / 512, §); in mac_partition() 56 if (!data) in mac_partition() 61 part = (struct mac_partition *) (data + partoffset); in mac_partition() 79 data = read_part_sector(state, pos/512, §); in mac_partition() 80 if (!data) in mac_partition() 82 part = (struct mac_partition *) (data + pos%512); in mac_partition()
|
D | ibm.c | 74 unsigned char *data; in find_label() local 102 data = read_part_sector(state, testsect[i], §); in find_label() 103 if (data == NULL) in find_label() 105 memcpy(label, data, sizeof(*label)); in find_label() 106 memcpy(temp, data, 4); in find_label() 143 unsigned char *data; in find_vol1_partitions() local 157 data = read_part_sector(state, blk * secperblk, §); in find_vol1_partitions() 158 while (data != NULL) { in find_vol1_partitions() 159 memcpy(&f1, data, sizeof(struct vtoc_format1_label)); in find_vol1_partitions() 167 data = read_part_sector(state, blk * secperblk, §); in find_vol1_partitions() [all …]
|
D | ultrix.c | 16 unsigned char *data; in ultrix_partition() local 29 data = read_part_sector(state, (16384 - sizeof(*label))/512, §); in ultrix_partition() 30 if (!data) in ultrix_partition() 33 label = (struct ultrix_disklabel *)(data + 512 - sizeof(*label)); in ultrix_partition()
|
D | karma.c | 20 unsigned char *data; in karma_partition() local 35 data = read_part_sector(state, 0, §); in karma_partition() 36 if (!data) in karma_partition() 39 label = (struct disklabel *)data; in karma_partition()
|
D | osf.c | 22 unsigned char *data; in osf_partition() local 56 data = read_part_sector(state, 0, §); in osf_partition() 57 if (!data) in osf_partition() 60 label = (struct disklabel *) (data+64); in osf_partition()
|
/block/ |
D | blk-mq-tag.c | 102 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, in __blk_mq_get_tag() argument 105 if (data->shallow_depth) in __blk_mq_get_tag() 106 return sbitmap_queue_get_shallow(bt, data->shallow_depth); in __blk_mq_get_tag() 111 unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, in blk_mq_get_tags() argument 114 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_get_tags() 118 if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || in blk_mq_get_tags() 119 data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) in blk_mq_get_tags() 126 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) in blk_mq_get_tag() argument 128 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_get_tag() 135 if (data->flags & BLK_MQ_REQ_RESERVED) { in blk_mq_get_tag() [all …]
|
D | blk-mq-debugfs.c | 28 static int queue_poll_stat_show(void *data, struct seq_file *m) in queue_poll_stat_show() argument 30 struct request_queue *q = data; in queue_poll_stat_show() 99 static int queue_pm_only_show(void *data, struct seq_file *m) in queue_pm_only_show() argument 101 struct request_queue *q = data; in queue_pm_only_show() 136 static int queue_state_show(void *data, struct seq_file *m) in queue_state_show() argument 138 struct request_queue *q = data; in queue_state_show() 146 static ssize_t queue_state_write(void *data, const char __user *buf, in queue_state_write() argument 149 struct request_queue *q = data; in queue_state_write() 200 static int hctx_state_show(void *data, struct seq_file *m) in hctx_state_show() argument 202 struct blk_mq_hw_ctx *hctx = data; in hctx_state_show() [all …]
|
D | blk-rq-qos.c | 209 struct rq_qos_wait_data *data = container_of(curr, in rq_qos_wake_function() local 217 if (!data->cb(data->rqw, data->private_data)) in rq_qos_wake_function() 220 data->got_token = true; in rq_qos_wake_function() 223 wake_up_process(data->task); in rq_qos_wake_function() 247 struct rq_qos_wait_data data = { in rq_qos_wait() local 250 .entry = LIST_HEAD_INIT(data.wq.entry), in rq_qos_wait() 263 has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq, in rq_qos_wait() 267 if (data.got_token) in rq_qos_wait() 270 finish_wait(&rqw->wait, &data.wq); in rq_qos_wait() 278 if (data.got_token) in rq_qos_wait() [all …]
|
D | blk-wbt.c | 355 struct rq_wb *rwb = cb->data; in wb_timer_fn() 498 struct wbt_wait_data *data = private_data; in wbt_inflight_cb() local 499 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf)); in wbt_inflight_cb() 504 struct wbt_wait_data *data = private_data; in wbt_cleanup_cb() local 505 wbt_rqw_done(data->rwb, rqw, data->wb_acct); in wbt_cleanup_cb() 516 struct wbt_wait_data data = { in __wbt_wait() local 522 rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb); in __wbt_wait() 718 static int wbt_curr_win_nsec_show(void *data, struct seq_file *m) in wbt_curr_win_nsec_show() argument 720 struct rq_qos *rqos = data; in wbt_curr_win_nsec_show() 727 static int wbt_enabled_show(void *data, struct seq_file *m) in wbt_enabled_show() argument [all …]
|
D | sed-opal.c | 33 int (*fn)(struct opal_dev *dev, void *data); 34 void *data; member 79 void *data; member 268 static bool check_tper(const void *data) in check_tper() argument 270 const struct d0_tper_features *tper = data; in check_tper() 282 static bool check_lcksuppt(const void *data) in check_lcksuppt() argument 284 const struct d0_locking_features *lfeat = data; in check_lcksuppt() 290 static bool check_lckenabled(const void *data) in check_lckenabled() argument 292 const struct d0_locking_features *lfeat = data; in check_lckenabled() 298 static bool check_locked(const void *data) in check_locked() argument [all …]
|
D | blk-map.c | 21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, in bio_alloc_map_data() argument 26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data() 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data() 33 bmd->iter = *data; in bio_alloc_map_data() 386 static struct bio *bio_map_kern(struct request_queue *q, void *data, in bio_map_kern() argument 389 unsigned long kaddr = (unsigned long)data; in bio_map_kern() 393 bool is_vmalloc = is_vmalloc_addr(data); in bio_map_kern() 404 flush_kernel_vmap_range(data, len); in bio_map_kern() 405 bio->bi_private = data; in bio_map_kern() [all …]
|
D | blk-mq.c | 224 q->q_usage_counter.data->force_atomic = true; in __blk_mq_unfreeze_queue() 341 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, in blk_mq_rq_ctx_init() argument 344 struct blk_mq_ctx *ctx = data->ctx; in blk_mq_rq_ctx_init() 345 struct blk_mq_hw_ctx *hctx = data->hctx; in blk_mq_rq_ctx_init() 346 struct request_queue *q = data->q; in blk_mq_rq_ctx_init() 352 rq->cmd_flags = data->cmd_flags; in blk_mq_rq_ctx_init() 354 if (data->flags & BLK_MQ_REQ_PM) in blk_mq_rq_ctx_init() 355 data->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init() 357 data->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init() 358 rq->rq_flags = data->rq_flags; in blk_mq_rq_ctx_init() [all …]
|
D | kyber-iosched.c | 556 static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) in kyber_limit_depth() argument 563 struct kyber_queue_data *kqd = data->q->elevator->elevator_data; in kyber_limit_depth() 565 data->shallow_depth = kqd->async_depth; in kyber_limit_depth() 668 static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data) in flush_busy_kcq() argument 670 struct flush_kcq_data *flush_data = data; in flush_busy_kcq() 686 struct flush_kcq_data data = { in kyber_flush_busy_kcqs() local 693 flush_busy_kcq, &data); in kyber_flush_busy_kcqs() 902 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \ 904 struct request_queue *q = data; \ 946 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ [all …]
|
D | ioprio.c | 36 int data = IOPRIO_PRIO_DATA(ioprio); in ioprio_check_cap() local 52 if (data >= IOPRIO_NR_LEVELS || data < 0) in ioprio_check_cap() 58 if (data) in ioprio_check_cap()
|
D | blk-zoned.c | 145 unsigned int nr_zones, report_zones_cb cb, void *data) in blkdev_report_zones() argument 156 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); in blkdev_report_zones() 168 void *data) in blk_zone_need_reset_cb() argument 181 set_bit(idx, (unsigned long *)data); in blk_zone_need_reset_cb() 319 void *data) in blkdev_copy_zone_to_user() argument 321 struct zone_report_args *args = data; in blkdev_copy_zone_to_user() 469 void *data) in blk_revalidate_zone_cb() argument 471 struct blk_revalidate_zone_args *args = data; in blk_revalidate_zone_cb()
|
D | mq-deadline.c | 604 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) in dd_limit_depth() argument 606 struct deadline_data *dd = data->q->elevator->elevator_data; in dd_limit_depth() 616 data->shallow_depth = dd->async_depth; in dd_limit_depth() 1034 static int deadline_##name##_next_rq_show(void *data, \ 1037 struct request_queue *q = data; \ 1055 static int deadline_batching_show(void *data, struct seq_file *m) in deadline_batching_show() argument 1057 struct request_queue *q = data; in deadline_batching_show() 1064 static int deadline_starved_show(void *data, struct seq_file *m) in deadline_starved_show() argument 1066 struct request_queue *q = data; in deadline_starved_show() 1073 static int dd_async_depth_show(void *data, struct seq_file *m) in dd_async_depth_show() argument [all …]
|
D | blk-mq-debugfs-zoned.c | 9 int queue_zone_wlock_show(void *data, struct seq_file *m) in queue_zone_wlock_show() argument 11 struct request_queue *q = data; in queue_zone_wlock_show()
|
D | t10-pi.c | 17 static __be16 t10_pi_crc_fn(void *data, unsigned int len) in t10_pi_crc_fn() argument 19 return cpu_to_be16(crc_t10dif(data, len)); in t10_pi_crc_fn() 22 static __be16 t10_pi_ip_fn(void *data, unsigned int len) in t10_pi_ip_fn() argument 24 return (__force __be16)ip_compute_csum(data, len); in t10_pi_ip_fn() 283 static __be64 ext_pi_crc64(void *data, unsigned int len) in ext_pi_crc64() argument 285 return cpu_to_be64(crc64_rocksoft(data, len)); in ext_pi_crc64()
|
D | blk-mq.h | 172 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) in blk_mq_tags_from_data() argument 174 if (!(data->rq_flags & RQF_ELV)) in blk_mq_tags_from_data() 175 return data->hctx->tags; in blk_mq_tags_from_data() 176 return data->hctx->sched_tags; in blk_mq_tags_from_data()
|
D | blk-stat.h | 60 void *data; member 89 unsigned int buckets, void *data);
|