/block/ |
D | blk-sysfs.c | 28 queue_var_show(unsigned long var, char *page) in queue_var_show() argument 30 return sprintf(page, "%lu\n", var); in queue_var_show() 34 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument 39 err = kstrtoul(page, 10, &v); in queue_var_store() 48 static ssize_t queue_var_store64(s64 *var, const char *page) in queue_var_store64() argument 53 err = kstrtos64(page, 10, &v); in queue_var_store64() 61 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument 63 return queue_var_show(q->nr_requests, (page)); in queue_requests_show() 67 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 75 ret = queue_var_store(&nr, page, count); in queue_requests_store() [all …]
|
D | blk-integrity.c | 222 char *page) in integrity_attr_show() argument 229 return entry->show(bi, page); in integrity_attr_show() 233 struct attribute *attr, const char *page, in integrity_attr_store() argument 243 ret = entry->store(bi, page, count); in integrity_attr_store() 248 static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) in integrity_format_show() argument 251 return sprintf(page, "%s\n", bi->profile->name); in integrity_format_show() 253 return sprintf(page, "none\n"); in integrity_format_show() 256 static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) in integrity_tag_size_show() argument 258 return sprintf(page, "%u\n", bi->tag_size); in integrity_tag_size_show() 261 static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page) in integrity_interval_show() argument [all …]
|
D | blk-map.c | 134 struct page *page; in bio_copy_user_iov() local 181 page = map_data->pages[i / nr_pages]; in bio_copy_user_iov() 182 page += (i % nr_pages); in bio_copy_user_iov() 186 page = alloc_page(rq->q->bounce_gfp | gfp_mask); in bio_copy_user_iov() 187 if (!page) { in bio_copy_user_iov() 193 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov() 195 __free_page(page); in bio_copy_user_iov() 260 struct page **pages; in bio_map_user_iov() 278 struct page *page = pages[j]; in bio_map_user_iov() local 285 if (!bio_add_hw_page(rq->q, bio, page, n, offs, in bio_map_user_iov() [all …]
|
D | blk-mq-sysfs.c | 61 char *page) in blk_mq_sysfs_show() argument 76 res = entry->show(ctx, page); in blk_mq_sysfs_show() 82 const char *page, size_t length) in blk_mq_sysfs_store() argument 97 res = entry->store(ctx, page, length); in blk_mq_sysfs_store() 103 struct attribute *attr, char *page) in blk_mq_hw_sysfs_show() argument 118 res = entry->show(hctx, page); in blk_mq_hw_sysfs_show() 124 struct attribute *attr, const char *page, in blk_mq_hw_sysfs_store() argument 140 res = entry->store(hctx, page, length); in blk_mq_hw_sysfs_store() 146 char *page) in blk_mq_hw_sysfs_nr_tags_show() argument 148 return sprintf(page, "%u\n", hctx->tags->nr_tags); in blk_mq_hw_sysfs_nr_tags_show() [all …]
|
D | bio.c | 741 struct page *page, unsigned int len, unsigned int off, in page_is_mergeable() argument 746 phys_addr_t page_addr = page_to_phys(page); in page_is_mergeable() 750 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) in page_is_mergeable() 756 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); in page_is_mergeable() 765 struct page *page, unsigned len, in bio_try_merge_hw_seg() argument 771 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; in bio_try_merge_hw_seg() 777 return __bio_try_merge_page(bio, page, len, offset, same_page); in bio_try_merge_hw_seg() 794 struct page *page, unsigned int len, unsigned int offset, in bio_add_hw_page() argument 806 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) in bio_add_hw_page() 825 bvec->bv_page = page; in bio_add_hw_page() [all …]
|
D | badblocks.c | 58 u64 *p = bb->page; in badblocks_check() 130 u64 *p = bb->page; in badblocks_update_acked() 187 p = bb->page; in badblocks_set() 353 p = bb->page; in badblocks_clear() 435 if (bb->page == NULL || bb->changed) in ack_all_badblocks() 441 u64 *p = bb->page; in ack_all_badblocks() 467 ssize_t badblocks_show(struct badblocks *bb, char *page, int unack) in badblocks_show() argument 471 u64 *p = bb->page; in badblocks_show() 493 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", in badblocks_show() 517 ssize_t badblocks_store(struct badblocks *bb, const char *page, size_t len, in badblocks_store() argument [all …]
|
D | blk-lib.c | 166 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() argument 195 bio->bi_io_vec->bv_page = page; in __blkdev_issue_write_same() 228 struct page *page) in blkdev_issue_write_same() argument 235 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, in blkdev_issue_write_same()
|
D | bounce.c | 323 struct page *page = to->bv_page; in __blk_queue_bounce() local 325 if (page_to_pfn(page) <= q->limits.bounce_pfn) in __blk_queue_bounce() 334 flush_dcache_page(page); in __blk_queue_bounce() 337 vfrom = kmap_atomic(page) + to->bv_offset; in __blk_queue_bounce()
|
D | blk-crypto-fallback.c | 317 struct page *plaintext_page = enc_bvec->bv_page; in blk_crypto_fallback_encrypt_bio() 318 struct page *ciphertext_page = in blk_crypto_fallback_encrypt_bio() 416 struct page *page = bv.bv_page; in blk_crypto_fallback_decrypt_bio() local 418 sg_set_page(&sg, page, data_unit_size, bv.bv_offset); in blk_crypto_fallback_decrypt_bio()
|
D | blk.h | 318 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); 320 const char *page, size_t count); 448 struct page *page, unsigned int len, unsigned int offset,
|
D | blk-mq.c | 2389 struct page *page; in blk_mq_clear_rq_mapping() local 2392 list_for_each_entry(page, &tags->page_list, lru) { in blk_mq_clear_rq_mapping() 2393 unsigned long start = (unsigned long)page_address(page); in blk_mq_clear_rq_mapping() 2394 unsigned long end = start + order_to_size(page->private); in blk_mq_clear_rq_mapping() 2421 struct page *page; in blk_mq_free_rqs() local 2439 page = list_first_entry(&tags->page_list, struct page, lru); in blk_mq_free_rqs() 2440 list_del_init(&page->lru); in blk_mq_free_rqs() 2445 kmemleak_free(page_address(page)); in blk_mq_free_rqs() 2446 __free_pages(page, page->private); in blk_mq_free_rqs() 2539 struct page *page; in blk_mq_alloc_rqs() local [all …]
|
D | bio-integrity.c | 129 int bio_integrity_add_page(struct bio *bio, struct page *page, in bio_integrity_add_page() argument 147 iv->bv_page = page; in bio_integrity_add_page()
|
D | bfq-iosched.c | 6592 static ssize_t bfq_var_show(unsigned int var, char *page) in bfq_var_show() argument 6594 return sprintf(page, "%u\n", var); in bfq_var_show() 6597 static int bfq_var_store(unsigned long *var, const char *page) in bfq_var_store() argument 6600 int ret = kstrtoul(page, 10, &new_val); in bfq_var_store() 6609 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 6617 return bfq_var_show(__data, (page)); \ 6631 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 6636 return bfq_var_show(__data, (page)); \ 6643 __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 6649 ret = bfq_var_store(&__data, (page)); \ [all …]
|
D | elevator.c | 441 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) in elv_attr_show() argument 452 error = e->type ? entry->show(e, page) : -ENOENT; in elv_attr_show() 459 const char *page, size_t length) in elv_attr_store() argument 470 error = e->type ? entry->store(e, page, length) : -ENOENT; in elv_attr_store()
|
D | blk-merge.c | 165 struct page *start_page, in get_max_segment_size() 431 struct page *page = bvec->bv_page; in blk_bvec_map_sg() local 441 page += (offset >> PAGE_SHIFT); in blk_bvec_map_sg() 445 sg_set_page(*sg, page, len, offset); in blk_bvec_map_sg()
|
D | kyber-iosched.c | 862 char *page) \ 866 return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \ 870 const char *page, size_t count) \ 876 ret = kstrtoull(page, 10, &nsec); \
|
D | mq-deadline-main.c | 871 static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 875 return sysfs_emit(page, "%d\n", __VAR); \ 889 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 894 __ret = kstrtoint(page, 0, &__data); \
|
D | genhd.c | 960 char *page) in disk_badblocks_show() argument 965 return sprintf(page, "\n"); in disk_badblocks_show() 967 return badblocks_show(disk->bb, page, 0); in disk_badblocks_show() 972 const char *page, size_t len) in disk_badblocks_store() argument 979 return badblocks_store(disk->bb, page, len, 0); in disk_badblocks_store()
|
D | blk-iocost.c | 873 u64 *page, u64 *seqio, u64 *randio) in calc_lcoefs() argument 877 *page = *seqio = *randio = 0; in calc_lcoefs() 883 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages); in calc_lcoefs() 885 *page = 1; in calc_lcoefs() 890 if (v > *page) in calc_lcoefs() 891 *seqio = v - *page; in calc_lcoefs() 896 if (v > *page) in calc_lcoefs() 897 *randio = v - *page; in calc_lcoefs()
|
D | blk-throttle.c | 2493 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) in blk_throtl_sample_time_show() argument 2497 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show() 2501 const char *page, size_t count) in blk_throtl_sample_time_store() argument 2508 if (kstrtoul(page, 10, &v)) in blk_throtl_sample_time_store()
|
/block/partitions/ |
D | core.c | 780 struct page *page; in read_part_sector() local 787 page = read_mapping_page(mapping, in read_part_sector() 789 if (IS_ERR(page)) in read_part_sector() 791 if (PageError(page)) in read_part_sector() 794 p->v = page; in read_part_sector() 795 return (unsigned char *)page_address(page) + in read_part_sector() 798 put_page(page); in read_part_sector()
|
D | check.h | 28 struct page *v;
|