/block/ |
D | blk-map.c | 165 unsigned int bytes = PAGE_SIZE; in bio_copy_user_iov() local 167 bytes -= offset; in bio_copy_user_iov() 169 if (bytes > len) in bio_copy_user_iov() 170 bytes = len; in bio_copy_user_iov() 190 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov() 196 len -= bytes; in bio_copy_user_iov() 251 ssize_t bytes; in bio_map_user_iov() local 255 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); in bio_map_user_iov() 256 if (unlikely(bytes <= 0)) { in bio_map_user_iov() 257 ret = bytes ? bytes : -EFAULT; in bio_map_user_iov() [all …]
|
D | bio-integrity.c | 205 unsigned int bytes, offset, i; in bio_integrity_prep() local 266 bytes = PAGE_SIZE - offset; in bio_integrity_prep() 271 if (bytes > len) in bio_integrity_prep() 272 bytes = len; in bio_integrity_prep() 275 bytes, offset); in bio_integrity_prep() 283 if (ret < bytes) in bio_integrity_prep() 286 buf += bytes; in bio_integrity_prep() 287 len -= bytes; in bio_integrity_prep() 374 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); in bio_integrity_advance() local 377 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); in bio_integrity_advance()
|
D | blk-crypto-internal.h | 133 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); 134 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) in bio_crypt_advance() argument 137 __bio_crypt_advance(bio, bytes); in bio_crypt_advance()
|
D | blk-crypto.c | 156 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) in __bio_crypt_advance() argument 161 bytes >> bc->bc_key->data_unit_size_bits); in __bio_crypt_advance() 169 unsigned int bytes, in bio_crypt_dun_is_contiguous() argument 173 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; in bio_crypt_dun_is_contiguous()
|
D | blk-cgroup.c | 776 dst->bytes[i] = src->bytes[i]; in blkg_iostat_set() 786 dst->bytes[i] += src->bytes[i]; in blkg_iostat_add() 796 dst->bytes[i] -= src->bytes[i]; in blkg_iostat_sub() 885 tmp.bytes[BLKG_IOSTAT_READ] += in blkcg_fill_root_iostats() 887 tmp.bytes[BLKG_IOSTAT_WRITE] += in blkcg_fill_root_iostats() 889 tmp.bytes[BLKG_IOSTAT_DISCARD] += in blkcg_fill_root_iostats() 920 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; in blkcg_print_one_stat() 921 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; in blkcg_print_one_stat() 922 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; in blkcg_print_one_stat() 1936 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; in blk_cgroup_bio_start()
|
D | blk.h | 85 unsigned int bytes) in blk_segments() argument 93 if (bytes <= mss) in blk_segments() 96 return round_up(bytes, mss) >> ilog2(mss); in blk_segments() 97 return (bytes + mss - 1) / mss; in blk_segments()
|
D | blk-core.c | 677 static bool should_fail_request(struct block_device *part, unsigned int bytes) in should_fail_request() argument 679 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); in should_fail_request() 695 unsigned int bytes) in should_fail_request() argument 1202 unsigned int bytes = 0; in blk_rq_err_bytes() local 1218 bytes += bio->bi_iter.bi_size; in blk_rq_err_bytes() 1222 BUG_ON(blk_rq_bytes(rq) && !bytes); in blk_rq_err_bytes() 1223 return bytes; in blk_rq_err_bytes() 1243 static void blk_account_io_completion(struct request *req, unsigned int bytes) in blk_account_io_completion() argument 1249 part_stat_add(req->part, sectors[sgrp], bytes >> 9); in blk_account_io_completion()
|
D | bio.c | 1258 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument 1261 bio_integrity_advance(bio, bytes); in bio_advance() 1263 bio_crypt_advance(bio, bytes); in bio_advance() 1264 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance() 1274 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter() local 1278 memcpy(dst_buf, src_buf, bytes); in bio_copy_data_iter() 1283 bio_advance_iter_single(src, src_iter, bytes); in bio_copy_data_iter() 1284 bio_advance_iter_single(dst, dst_iter, bytes); in bio_copy_data_iter()
|
D | bfq-cgroup.c | 357 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); in bfqg_stats_update_legacy_io() 445 blkg_rwstat_exit(&stats->bytes); in bfqg_stats_exit() 464 if (blkg_rwstat_init(&stats->bytes, gfp) || in bfqg_stats_init() 1215 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); in bfqg_prfill_sectors() 1233 offsetof(struct bfq_group, stats.bytes), &tmp); in bfqg_prfill_sectors_recursive() 1316 .private = offsetof(struct bfq_group, stats.bytes), 1359 .private = offsetof(struct bfq_group, stats.bytes),
|
D | blk-crypto-fallback.c | 243 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; member 314 iv.bytes); in blk_crypto_fallback_encrypt_bio() 415 iv.bytes); in blk_crypto_fallback_decrypt_bio()
|
D | bfq-iosched.h | 840 struct blkg_rwstat bytes; member
|