Home
last modified time | relevance | path

Searched refs:bytes (Results 1 – 11 of 11) sorted by relevance

/block/
Dblk-map.c168 unsigned int bytes = PAGE_SIZE; in bio_copy_user_iov() local
170 bytes -= offset; in bio_copy_user_iov()
172 if (bytes > len) in bio_copy_user_iov()
173 bytes = len; in bio_copy_user_iov()
193 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
199 len -= bytes; in bio_copy_user_iov()
261 ssize_t bytes; in bio_map_user_iov() local
265 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs); in bio_map_user_iov()
266 if (unlikely(bytes <= 0)) { in bio_map_user_iov()
267 ret = bytes ? bytes : -EFAULT; in bio_map_user_iov()
[all …]
Dbio-integrity.c216 unsigned int bytes, offset, i; in bio_integrity_prep() local
277 bytes = PAGE_SIZE - offset; in bio_integrity_prep()
282 if (bytes > len) in bio_integrity_prep()
283 bytes = len; in bio_integrity_prep()
286 bytes, offset); in bio_integrity_prep()
294 if (ret < bytes) in bio_integrity_prep()
297 buf += bytes; in bio_integrity_prep()
298 len -= bytes; in bio_integrity_prep()
385 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); in bio_integrity_advance() local
388 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); in bio_integrity_advance()
Dblk-crypto-internal.h108 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
109 static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes) in bio_crypt_advance() argument
112 __bio_crypt_advance(bio, bytes); in bio_crypt_advance()
Dblk-crypto.c137 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) in __bio_crypt_advance() argument
142 bytes >> bc->bc_key->data_unit_size_bits); in __bio_crypt_advance()
150 unsigned int bytes, in bio_crypt_dun_is_contiguous() argument
154 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits; in bio_crypt_dun_is_contiguous()
Dscsi_ioctl.c414 unsigned int in_len, out_len, bytes, opcode, cmdlen; in sg_scsi_ioctl() local
432 bytes = max(in_len, out_len); in sg_scsi_ioctl()
433 if (bytes) { in sg_scsi_ioctl()
434 buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); in sg_scsi_ioctl()
491 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) { in sg_scsi_ioctl()
501 bytes = (OMAX_SB_LEN > req->sense_len) ? in sg_scsi_ioctl()
503 if (copy_to_user(sic->data, req->sense, bytes)) in sg_scsi_ioctl()
Dblk-cgroup.c740 dst->bytes[i] = src->bytes[i]; in blkg_iostat_set()
750 dst->bytes[i] += src->bytes[i]; in blkg_iostat_add()
760 dst->bytes[i] -= src->bytes[i]; in blkg_iostat_sub()
842 tmp.bytes[BLKG_IOSTAT_READ] += in blkcg_fill_root_iostats()
844 tmp.bytes[BLKG_IOSTAT_WRITE] += in blkcg_fill_root_iostats()
846 tmp.bytes[BLKG_IOSTAT_DISCARD] += in blkcg_fill_root_iostats()
899 rbytes = bis->cur.bytes[BLKG_IOSTAT_READ]; in blkcg_print_stat()
900 wbytes = bis->cur.bytes[BLKG_IOSTAT_WRITE]; in blkcg_print_stat()
901 dbytes = bis->cur.bytes[BLKG_IOSTAT_DISCARD]; in blkcg_print_stat()
1935 bis->cur.bytes[rwd] += bio->bi_iter.bi_size; in blk_cgroup_bio_start()
Dbio.c1191 void bio_advance(struct bio *bio, unsigned bytes) in bio_advance() argument
1194 bio_integrity_advance(bio, bytes); in bio_advance()
1196 bio_crypt_advance(bio, bytes); in bio_advance()
1197 bio_advance_iter(bio, &bio->bi_iter, bytes); in bio_advance()
1206 unsigned bytes; in bio_copy_data_iter() local
1212 bytes = min(src_bv.bv_len, dst_bv.bv_len); in bio_copy_data_iter()
1219 bytes); in bio_copy_data_iter()
1226 bio_advance_iter(src, src_iter, bytes); in bio_copy_data_iter()
1227 bio_advance_iter(dst, dst_iter, bytes); in bio_copy_data_iter()
Dblk-core.c681 static bool should_fail_request(struct hd_struct *part, unsigned int bytes) in should_fail_request() argument
683 return part->make_it_fail && should_fail(&fail_make_request, bytes); in should_fail_request()
699 unsigned int bytes) in should_fail_request() argument
1246 unsigned int bytes = 0; in blk_rq_err_bytes() local
1262 bytes += bio->bi_iter.bi_size; in blk_rq_err_bytes()
1266 BUG_ON(blk_rq_bytes(rq) && !bytes); in blk_rq_err_bytes()
1267 return bytes; in blk_rq_err_bytes()
1286 static void blk_account_io_completion(struct request *req, unsigned int bytes) in blk_account_io_completion() argument
1294 part_stat_add(part, sectors[sgrp], bytes >> 9); in blk_account_io_completion()
Dbfq-cgroup.c357 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); in bfqg_stats_update_legacy_io()
445 blkg_rwstat_exit(&stats->bytes); in bfqg_stats_exit()
464 if (blkg_rwstat_init(&stats->bytes, gfp) || in bfqg_stats_init()
1205 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); in bfqg_prfill_sectors()
1223 offsetof(struct bfq_group, stats.bytes), &tmp); in bfqg_prfill_sectors_recursive()
1306 .private = offsetof(struct bfq_group, stats.bytes),
1349 .private = offsetof(struct bfq_group, stats.bytes),
Dblk-crypto-fallback.c242 u8 bytes[BLK_CRYPTO_MAX_IV_SIZE]; member
312 iv.bytes); in blk_crypto_fallback_encrypt_bio()
412 iv.bytes); in blk_crypto_fallback_decrypt_bio()
Dbfq-iosched.h815 struct blkg_rwstat bytes; member