Home
last modified time | relevance | path

Searched refs:bytes (Results 1 – 15 of 15) sorted by relevance

/block/
Dblk-map.c167 unsigned int bytes = PAGE_SIZE; in bio_copy_user_iov() local
169 bytes -= offset; in bio_copy_user_iov()
171 if (bytes > len) in bio_copy_user_iov()
172 bytes = len; in bio_copy_user_iov()
192 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
198 len -= bytes; in bio_copy_user_iov()
297 ssize_t bytes; in bio_map_user_iov() local
304 bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX, in bio_map_user_iov()
306 if (unlikely(bytes <= 0)) { in bio_map_user_iov()
307 ret = bytes ? bytes : -EFAULT; in bio_map_user_iov()
[all …]
Dbio-integrity.c219 unsigned int bytes, offset, i; in bio_integrity_prep() local
282 bytes = PAGE_SIZE - offset; in bio_integrity_prep()
284 if (bytes > len) in bio_integrity_prep()
285 bytes = len; in bio_integrity_prep()
288 bytes, offset) < bytes) { in bio_integrity_prep()
293 buf += bytes; in bio_integrity_prep()
294 len -= bytes; in bio_integrity_prep()
380 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); in bio_integrity_advance() local
383 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); in bio_integrity_advance()
Dblk-merge.c232 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes, in bvec_split_segs() argument
235 unsigned max_len = min(max_bytes, UINT_MAX) - *bytes; in bvec_split_segs()
253 *bytes += total_len; in bvec_split_segs()
284 unsigned nsegs = 0, bytes = 0; in bio_split_rw() local
295 bytes + bv.bv_len <= max_bytes && in bio_split_rw()
299 bytes += bv.bv_len; in bio_split_rw()
301 if (bvec_split_segs(lim, &bv, &nsegs, &bytes, in bio_split_rw()
330 bytes = ALIGN_DOWN(bytes, lim->logical_block_size); in bio_split_rw()
338 return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs); in bio_split_rw()
416 unsigned int bytes = 0; in blk_recalc_rq_segments() local
[all …]
Dblk-crypto-internal.h135 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
136 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) in bio_crypt_advance() argument
139 __bio_crypt_advance(bio, bytes); in bio_crypt_advance()
Dblk.h90 unsigned int bytes) in blk_segments() argument
98 if (bytes <= mss) in blk_segments()
101 return round_up(bytes, mss) >> ilog2(mss); in blk_segments()
102 return (bytes + mss - 1) / mss; in blk_segments()
509 bool should_fail_request(struct block_device *part, unsigned int bytes);
512 unsigned int bytes) in should_fail_request() argument
Dblk-crypto.c154 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) in __bio_crypt_advance() argument
159 bytes >> bc->bc_key->data_unit_size_bits); in __bio_crypt_advance()
167 unsigned int bytes, in bio_crypt_dun_is_contiguous() argument
171 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; in bio_crypt_dun_is_contiguous()
Dbounce.c209 unsigned i = 0, bytes = 0; in __blk_queue_bounce() local
215 bytes += from.bv_len; in __blk_queue_bounce()
227 sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >> in __blk_queue_bounce()
Dblk-cgroup.c628 dst->bytes[i] = src->bytes[i]; in blkg_iostat_set()
980 dst->bytes[i] += src->bytes[i]; in blkg_iostat_add()
990 dst->bytes[i] -= src->bytes[i]; in blkg_iostat_sub()
1130 tmp.bytes[BLKG_IOSTAT_READ] += in blkcg_fill_root_iostats()
1132 tmp.bytes[BLKG_IOSTAT_WRITE] += in blkcg_fill_root_iostats()
1134 tmp.bytes[BLKG_IOSTAT_DISCARD] += in blkcg_fill_root_iostats()
1164 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; in blkcg_print_one_stat()
1165 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; in blkcg_print_one_stat()
1166 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; in blkcg_print_one_stat()
2167 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; in blk_cgroup_bio_start()
Dbio.c1395 void __bio_advance(struct bio *bio, unsigned bytes) in __bio_advance() argument
1398 bio_integrity_advance(bio, bytes); in __bio_advance()
1400 bio_crypt_advance(bio, bytes); in __bio_advance()
1401 bio_advance_iter(bio, &bio->bi_iter, bytes); in __bio_advance()
1411 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter() local
1415 memcpy(dst_buf, src_buf, bytes); in bio_copy_data_iter()
1420 bio_advance_iter_single(src, src_iter, bytes); in bio_copy_data_iter()
1421 bio_advance_iter_single(dst, dst_iter, bytes); in bio_copy_data_iter()
Dbfq-cgroup.c350 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); in bfqg_stats_update_legacy_io()
438 blkg_rwstat_exit(&stats->bytes); in bfqg_stats_exit()
457 if (blkg_rwstat_init(&stats->bytes, gfp) || in bfqg_stats_init()
1227 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); in bfqg_prfill_sectors()
1245 offsetof(struct bfq_group, stats.bytes), &tmp); in bfqg_prfill_sectors_recursive()
1326 .private = offsetof(struct bfq_group, stats.bytes),
1369 .private = offsetof(struct bfq_group, stats.bytes),
Dblk-crypto-fallback.c245 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; member
316 iv.bytes); in blk_crypto_fallback_encrypt_bio()
417 iv.bytes); in blk_crypto_fallback_decrypt_bio()
Dblk-core.c494 bool should_fail_request(struct block_device *part, unsigned int bytes) in should_fail_request() argument
496 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); in should_fail_request()
Dblk-cgroup.h42 u64 bytes[BLKG_IOSTAT_NR]; member
Dbfq-iosched.h922 struct blkg_rwstat bytes; member
Dblk-mq.c785 static void blk_account_io_completion(struct request *req, unsigned int bytes) in blk_account_io_completion() argument
791 part_stat_add(req->part, sectors[sgrp], bytes >> 9); in blk_account_io_completion()