Home
last modified time | relevance | path

Searched refs:to (Results 1 – 8 of 8) sorted by relevance

/block/partitions/
DKconfig8 Say Y here if you would like to use hard disks under Linux which
12 Note that the answer to this question won't directly affect the
13 kernel: saying N will just cause the configurator to skip all
29 Say Y here if you would like to use hard disks under Linux which
42 Say Y here if you would like to use hard disks under Linux which
70 to read disks partitioned under RISCiX.
75 Say Y here if you would like to be able to read the hard disk
87 Say Y here if you would like to use hard disks under Linux which
94 Say Y here if you would like to use hard disks under Linux which
101 Say Y here if you would like to use hard disks under Linux which
[all …]
/block/
DKconfig13 Disable this option to remove the block layer support from the
24 Say Y here unless you know you really don't want to mount disks and
43 normally need to manually enable this.
50 Some storage devices allow extra information to be
51 stored/retrieved to help protect the data. The block layer
53 filesystems to ensure better data integrity.
79 Block layer bio throttling support. It can be used to limit
80 the IO rate to a device. IO rate policies are per cgroup and
81 one needs to mount and use blkio cgroup controller for creating
91 effort limit to prioritize cgroups. Depending on the setting, the limit
[all …]
Dbounce.c75 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() argument
86 bio_for_each_segment(tovec, to, iter) { in copy_to_high_bio_irq()
213 struct bio_vec *to, from; in __blk_queue_bounce() local
241 for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) { in __blk_queue_bounce()
244 if (!PageHighMem(to->bv_page)) in __blk_queue_bounce()
251 flush_dcache_page(to->bv_page); in __blk_queue_bounce()
252 memcpy_from_bvec(page_address(bounce_page), to); in __blk_queue_bounce()
254 to->bv_page = bounce_page; in __blk_queue_bounce()
Dholder.c22 static int add_symlink(struct kobject *from, struct kobject *to) in add_symlink() argument
24 return sysfs_create_link(from, to, kobject_name(to)); in add_symlink()
27 static void del_symlink(struct kobject *from, struct kobject *to) in del_symlink() argument
29 sysfs_remove_link(from, kobject_name(to)); in del_symlink()
DKconfig.iosched18 synchronous writes, it will self-tune queue depths to achieve that
25 of the device among all processes according to their weights,
27 also guarantees a low latency to interactive and soft
Dbfq-cgroup.c75 static inline void bfq_stat_add_aux(struct bfq_stat *to, in bfq_stat_add_aux() argument
79 &to->aux_cnt); in bfq_stat_add_aux()
380 static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) in bfqg_stats_add_aux() argument
382 if (!to || !from) in bfqg_stats_add_aux()
387 blkg_rwstat_add_aux(&to->merged, &from->merged); in bfqg_stats_add_aux()
388 blkg_rwstat_add_aux(&to->service_time, &from->service_time); in bfqg_stats_add_aux()
389 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); in bfqg_stats_add_aux()
391 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); in bfqg_stats_add_aux()
392 bfq_stat_add_aux(&to->avg_queue_size_samples, in bfqg_stats_add_aux()
394 bfq_stat_add_aux(&to->dequeue, &from->dequeue); in bfqg_stats_add_aux()
[all …]
Dfops.c542 static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) in blkdev_read_iter() argument
555 if (iov_iter_count(to) > size) { in blkdev_read_iter()
556 shorted = iov_iter_count(to) - size; in blkdev_read_iter()
557 iov_iter_truncate(to, size); in blkdev_read_iter()
560 ret = generic_file_read_iter(iocb, to); in blkdev_read_iter()
561 iov_iter_reexpand(to, iov_iter_count(to) + shorted); in blkdev_read_iter()
Dblk-cgroup-rwstat.h136 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, in blkg_rwstat_add_aux() argument
147 &to->aux_cnt[i]); in blkg_rwstat_add_aux()