Home
last modified time | relevance | path

Searched refs:data (Results 1 – 25 of 41) sorted by relevance

12

/block/partitions/
Dacorn.c25 adfs_partition(struct parsed_partitions *state, char *name, char *data, in adfs_partition() argument
31 if (adfs_checkbblk(data)) in adfs_partition()
34 dr = (struct adfs_discrecord *)(data + 0x1c0); in adfs_partition()
164 unsigned char *data; in adfspart_check_CUMANA() local
186 data = read_part_sector(state, start_blk * 2 + 6, &sect); in adfspart_check_CUMANA()
187 if (!data) in adfspart_check_CUMANA()
193 dr = adfs_partition(state, name, data, first_sector, slot++); in adfspart_check_CUMANA()
199 nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) * in adfspart_check_CUMANA()
211 switch (data[0x1fc] & 15) { in adfspart_check_CUMANA()
254 unsigned char *data; in adfspart_check_ADFS() local
[all …]
Dldm.c67 static bool ldm_parse_privhead(const u8 *data, struct privhead *ph) in ldm_parse_privhead() argument
71 BUG_ON(!data || !ph); in ldm_parse_privhead()
72 if (MAGIC_PRIVHEAD != get_unaligned_be64(data)) { in ldm_parse_privhead()
77 ph->ver_major = get_unaligned_be16(data + 0x000C); in ldm_parse_privhead()
78 ph->ver_minor = get_unaligned_be16(data + 0x000E); in ldm_parse_privhead()
79 ph->logical_disk_start = get_unaligned_be64(data + 0x011B); in ldm_parse_privhead()
80 ph->logical_disk_size = get_unaligned_be64(data + 0x0123); in ldm_parse_privhead()
81 ph->config_start = get_unaligned_be64(data + 0x012B); in ldm_parse_privhead()
82 ph->config_size = get_unaligned_be64(data + 0x0133); in ldm_parse_privhead()
104 if (uuid_parse(data + 0x0030, &ph->disk_id)) { in ldm_parse_privhead()
[all …]
Damiga.c39 unsigned char *data; in amiga_partition() local
54 data = read_part_sector(state, blk, &sect); in amiga_partition()
55 if (!data) { in amiga_partition()
61 if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK)) in amiga_partition()
64 rdb = (struct RigidDiskBlock *)data; in amiga_partition()
65 if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0) in amiga_partition()
70 *(__be32 *)(data+0xdc) = 0; in amiga_partition()
71 if (checksum_block((__be32 *)data, in amiga_partition()
101 data = read_part_sector(state, blk, &sect); in amiga_partition()
102 if (!data) { in amiga_partition()
[all …]
Dsysv68.c54 unsigned char *data; in sysv68_partition() local
59 data = read_part_sector(state, 0, &sect); in sysv68_partition()
60 if (!data) in sysv68_partition()
63 b = (struct dkblk0 *)data; in sysv68_partition()
72 data = read_part_sector(state, i, &sect); in sysv68_partition()
73 if (!data) in sysv68_partition()
79 slice = (struct slice *)data; in sysv68_partition()
Dmsdos.c136 unsigned char *data; in parse_extended() local
151 data = read_part_sector(state, this_sector, &sect); in parse_extended()
152 if (!data) in parse_extended()
155 if (!msdos_magic_present(data + 510)) in parse_extended()
158 p = (struct msdos_partition *) (data + 0x1be); in parse_extended()
533 unsigned char *data; in parse_minix() local
537 data = read_part_sector(state, offset, &sect); in parse_minix()
538 if (!data) in parse_minix()
541 p = (struct msdos_partition *)(data + 0x1be); in parse_minix()
546 if (msdos_magic_present(data + 510) && in parse_minix()
[all …]
Dmac.c34 unsigned char *data; in mac_partition() local
55 data = read_part_sector(state, datasize / 512, &sect); in mac_partition()
56 if (!data) in mac_partition()
61 part = (struct mac_partition *) (data + partoffset); in mac_partition()
79 data = read_part_sector(state, pos/512, &sect); in mac_partition()
80 if (!data) in mac_partition()
82 part = (struct mac_partition *) (data + pos%512); in mac_partition()
Dibm.c74 unsigned char *data; in find_label() local
102 data = read_part_sector(state, testsect[i], &sect); in find_label()
103 if (data == NULL) in find_label()
105 memcpy(label, data, sizeof(*label)); in find_label()
106 memcpy(temp, data, 4); in find_label()
143 unsigned char *data; in find_vol1_partitions() local
157 data = read_part_sector(state, blk * secperblk, &sect); in find_vol1_partitions()
158 while (data != NULL) { in find_vol1_partitions()
159 memcpy(&f1, data, sizeof(struct vtoc_format1_label)); in find_vol1_partitions()
167 data = read_part_sector(state, blk * secperblk, &sect); in find_vol1_partitions()
[all …]
Dultrix.c16 unsigned char *data; in ultrix_partition() local
29 data = read_part_sector(state, (16384 - sizeof(*label))/512, &sect); in ultrix_partition()
30 if (!data) in ultrix_partition()
33 label = (struct ultrix_disklabel *)(data + 512 - sizeof(*label)); in ultrix_partition()
Dkarma.c20 unsigned char *data; in karma_partition() local
35 data = read_part_sector(state, 0, &sect); in karma_partition()
36 if (!data) in karma_partition()
39 label = (struct disklabel *)data; in karma_partition()
Dosf.c22 unsigned char *data; in osf_partition() local
56 data = read_part_sector(state, 0, &sect); in osf_partition()
57 if (!data) in osf_partition()
60 label = (struct disklabel *) (data+64); in osf_partition()
/block/
Dblk-mq-debugfs.c27 static int queue_poll_stat_show(void *data, struct seq_file *m) in queue_poll_stat_show() argument
29 struct request_queue *q = data; in queue_poll_stat_show()
95 static int queue_pm_only_show(void *data, struct seq_file *m) in queue_pm_only_show() argument
97 struct request_queue *q = data; in queue_pm_only_show()
137 static int queue_state_show(void *data, struct seq_file *m) in queue_state_show() argument
139 struct request_queue *q = data; in queue_state_show()
147 static ssize_t queue_state_write(void *data, const char __user *buf, in queue_state_write() argument
150 struct request_queue *q = data; in queue_state_write()
184 static int queue_write_hint_show(void *data, struct seq_file *m) in queue_write_hint_show() argument
186 struct request_queue *q = data; in queue_write_hint_show()
[all …]
Dblk-mq-tag.c78 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, in __blk_mq_get_tag() argument
81 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && in __blk_mq_get_tag()
82 !hctx_may_queue(data->hctx, bt)) in __blk_mq_get_tag()
85 if (data->shallow_depth) in __blk_mq_get_tag()
86 return __sbitmap_queue_get_shallow(bt, data->shallow_depth); in __blk_mq_get_tag()
91 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) in blk_mq_get_tag() argument
93 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_get_tag()
100 if (data->flags & BLK_MQ_REQ_RESERVED) { in blk_mq_get_tag()
112 tag = __blk_mq_get_tag(data, bt); in blk_mq_get_tag()
116 if (data->flags & BLK_MQ_REQ_NOWAIT) in blk_mq_get_tag()
[all …]
Dblk-rq-qos.c215 struct rq_qos_wait_data *data = container_of(curr, in rq_qos_wake_function() local
223 if (!data->cb(data->rqw, data->private_data)) in rq_qos_wake_function()
226 data->got_token = true; in rq_qos_wake_function()
229 wake_up_process(data->task); in rq_qos_wake_function()
253 struct rq_qos_wait_data data = { in rq_qos_wait() local
256 .entry = LIST_HEAD_INIT(data.wq.entry), in rq_qos_wait()
269 has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq, in rq_qos_wait()
273 if (data.got_token) in rq_qos_wait()
276 finish_wait(&rqw->wait, &data.wq); in rq_qos_wait()
284 if (data.got_token) in rq_qos_wait()
[all …]
Dblk-wbt.c355 struct rq_wb *rwb = cb->data; in wb_timer_fn()
495 struct wbt_wait_data *data = private_data; in wbt_inflight_cb() local
496 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw)); in wbt_inflight_cb()
501 struct wbt_wait_data *data = private_data; in wbt_cleanup_cb() local
502 wbt_rqw_done(data->rwb, rqw, data->wb_acct); in wbt_cleanup_cb()
513 struct wbt_wait_data data = { in __wbt_wait() local
519 rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb); in __wbt_wait()
716 static int wbt_curr_win_nsec_show(void *data, struct seq_file *m) in wbt_curr_win_nsec_show() argument
718 struct rq_qos *rqos = data; in wbt_curr_win_nsec_show()
725 static int wbt_enabled_show(void *data, struct seq_file *m) in wbt_enabled_show() argument
[all …]
Dsed-opal.c33 int (*fn)(struct opal_dev *dev, void *data);
34 void *data; member
80 void *data; member
269 static bool check_tper(const void *data) in check_tper() argument
271 const struct d0_tper_features *tper = data; in check_tper()
283 static bool check_mbrenabled(const void *data) in check_mbrenabled() argument
285 const struct d0_locking_features *lfeat = data; in check_mbrenabled()
291 static bool check_sum(const void *data) in check_sum() argument
293 const struct d0_single_user_mode *sum = data; in check_sum()
306 static u16 get_comid_v100(const void *data) in get_comid_v100() argument
[all …]
Dblk-map.c21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, in bio_alloc_map_data() argument
26 if (data->nr_segs > UIO_MAXIOV) in bio_alloc_map_data()
29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data()
32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); in bio_alloc_map_data()
33 bmd->iter = *data; in bio_alloc_map_data()
381 static struct bio *bio_map_kern(struct request_queue *q, void *data, in bio_map_kern() argument
384 unsigned long kaddr = (unsigned long)data; in bio_map_kern()
388 bool is_vmalloc = is_vmalloc_addr(data); in bio_map_kern()
398 flush_kernel_vmap_range(data, len); in bio_map_kern()
399 bio->bi_private = data; in bio_map_kern()
[all …]
Dblk-mq.c278 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, in blk_mq_rq_ctx_init() argument
281 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_rq_ctx_init()
284 if (data->q->elevator) { in blk_mq_rq_ctx_init()
293 rq->q = data->q; in blk_mq_rq_ctx_init()
294 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
295 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
297 rq->cmd_flags = data->cmd_flags; in blk_mq_rq_ctx_init()
298 if (data->flags & BLK_MQ_REQ_PM) in blk_mq_rq_ctx_init()
300 if (blk_queue_io_stat(data->q)) in blk_mq_rq_ctx_init()
329 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; in blk_mq_rq_ctx_init()
[all …]
Dkyber-iosched.c552 static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) in kyber_limit_depth() argument
559 struct kyber_queue_data *kqd = data->q->elevator->elevator_data; in kyber_limit_depth()
561 data->shallow_depth = kqd->async_depth; in kyber_limit_depth()
664 static bool flush_busy_kcq(struct sbitmap *sb, unsigned int bitnr, void *data) in flush_busy_kcq() argument
666 struct flush_kcq_data *flush_data = data; in flush_busy_kcq()
682 struct flush_kcq_data data = { in kyber_flush_busy_kcqs() local
689 flush_busy_kcq, &data); in kyber_flush_busy_kcqs()
898 static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \
900 struct request_queue *q = data; \
942 static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
[all …]
Dmq-deadline-main.c531 static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) in dd_limit_depth() argument
533 struct deadline_data *dd = data->q->elevator->elevator_data; in dd_limit_depth()
543 data->shallow_depth = dd->async_depth; in dd_limit_depth()
973 static int deadline_##name##_next_rq_show(void *data, \
976 struct request_queue *q = data; \
994 static int deadline_batching_show(void *data, struct seq_file *m) in deadline_batching_show() argument
996 struct request_queue *q = data; in deadline_batching_show()
1003 static int deadline_starved_show(void *data, struct seq_file *m) in deadline_starved_show() argument
1005 struct request_queue *q = data; in deadline_starved_show()
1012 static int dd_async_depth_show(void *data, struct seq_file *m) in dd_async_depth_show() argument
[all …]
Dioprio.c68 int data = IOPRIO_PRIO_DATA(ioprio); in ioprio_check_cap() local
84 if (data >= IOPRIO_BE_NR || data < 0) in ioprio_check_cap()
91 if (data) in ioprio_check_cap()
Dblk-mq-debugfs-zoned.c9 int queue_zone_wlock_show(void *data, struct seq_file *m) in queue_zone_wlock_show() argument
11 struct request_queue *q = data; in queue_zone_wlock_show()
Dblk-zoned.c156 unsigned int nr_zones, report_zones_cb cb, void *data) in blkdev_report_zones() argument
168 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); in blkdev_report_zones()
267 void *data) in blkdev_copy_zone_to_user() argument
269 struct zone_report_args *args = data; in blkdev_copy_zone_to_user()
429 void *data) in blk_revalidate_zone_cb() argument
431 struct blk_revalidate_zone_args *args = data; in blk_revalidate_zone_cb()
Dblk-mq.h170 static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data) in blk_mq_tags_from_data() argument
172 if (data->q->elevator) in blk_mq_tags_from_data()
173 return data->hctx->sched_tags; in blk_mq_tags_from_data()
175 return data->hctx->tags; in blk_mq_tags_from_data()
Dt10-pi.c15 static __be16 t10_pi_crc_fn(void *data, unsigned int len) in t10_pi_crc_fn() argument
17 return cpu_to_be16(crc_t10dif(data, len)); in t10_pi_crc_fn()
20 static __be16 t10_pi_ip_fn(void *data, unsigned int len) in t10_pi_ip_fn() argument
22 return (__force __be16)ip_compute_csum(data, len); in t10_pi_ip_fn()
Dscsi_ioctl.c429 if (get_user(opcode, sic->data)) in sg_scsi_ioctl()
454 if (copy_from_user(req->cmd, sic->data, cmdlen)) in sg_scsi_ioctl()
457 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) in sg_scsi_ioctl()
503 if (copy_to_user(sic->data, req->sense, bytes)) in sg_scsi_ioctl()
507 if (copy_to_user(sic->data, buffer, out_len)) in sg_scsi_ioctl()
523 int cmd, int data) in __blk_send_generic() argument
533 scsi_req(rq)->cmd[4] = data; in __blk_send_generic()
543 struct gendisk *bd_disk, int data) in blk_send_start_stop() argument
545 return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); in blk_send_start_stop()

12