/block/partitions/ |
D | aix.c | 80 size_t count) in read_lba() argument 84 if (!buffer || lba + count / 512 > get_capacity(state->disk) - 1ULL) in read_lba() 87 while (count) { in read_lba() 93 if (copied > count) in read_lba() 94 copied = count; in read_lba() 99 count -= copied; in read_lba() 115 size_t count = sizeof(struct pvd); in alloc_pvd() local 118 p = kmalloc(count, GFP_KERNEL); in alloc_pvd() 122 if (read_lba(state, lba, (u8 *) p, count) < count) { in alloc_pvd() 140 size_t count = sizeof(struct lvname) * LVM_MAXLVS; in alloc_lvn() local [all …]
|
D | efi.c | 236 u64 lba, u8 *buffer, size_t count) in read_lba() argument 245 while (count) { in read_lba() 251 if (copied > count) in read_lba() 252 copied = count; in read_lba() 257 count -= copied; in read_lba() 274 size_t count; in alloc_read_gpt_entries() local 280 count = (size_t)le32_to_cpu(gpt->num_partition_entries) * in alloc_read_gpt_entries() 282 if (!count) in alloc_read_gpt_entries() 284 pte = kmalloc(count, GFP_KERNEL); in alloc_read_gpt_entries() 289 (u8 *) pte, count) < count) { in alloc_read_gpt_entries()
|
/block/ |
D | badblocks.c | 76 hi = bb->count; in badblocks_check() 137 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked() 189 hi = bb->count; in badblocks_set() 235 if (sectors && hi < bb->count) { in badblocks_set() 265 if (sectors == 0 && hi < bb->count) { in badblocks_set() 279 (bb->count - hi - 1) * 8); in badblocks_set() 280 bb->count--; in badblocks_set() 287 if (bb->count >= MAX_BADBLOCKS) { in badblocks_set() 295 (bb->count - hi) * 8); in badblocks_set() 296 bb->count++; in badblocks_set() [all …]
|
D | blk-sysfs.c | 34 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument 45 return count; in queue_var_store() 67 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument 75 ret = queue_var_store(&nr, page, count); in queue_requests_store() 100 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument 107 ret = queue_var_store(&ra_kb, page, count); in queue_ra_store() 186 const char *page, size_t count) in queue_discard_max_store() argument 189 ssize_t ret = queue_var_store(&max_discard, page, count); in queue_discard_max_store() 239 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument 244 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); in queue_max_sectors_store() [all …]
|
D | blk-mq-debugfs.c | 148 size_t count, loff_t *ppos) in queue_state_write() argument 161 if (count >= sizeof(opbuf)) { in queue_state_write() 166 if (copy_from_user(opbuf, buf, count)) in queue_state_write() 181 return count; in queue_state_write() 196 size_t count, loff_t *ppos) in queue_write_hint_store() argument 204 return count; in queue_write_hint_store() 543 size_t count, loff_t *ppos) in hctx_io_poll_write() argument 548 return count; in hctx_io_poll_write() 569 size_t count, loff_t *ppos) in hctx_dispatched_write() argument 576 return count; in hctx_dispatched_write() [all …]
|
D | blk-mq-sched.c | 92 unsigned int count = 0; in blk_mq_dispatch_hctx_list() local 99 count++; in blk_mq_dispatch_hctx_list() 104 return blk_mq_dispatch_rq_list(hctx, &hctx_list, count); in blk_mq_dispatch_hctx_list() 125 int count = 0; in __blk_mq_do_dispatch_sched() local 170 count++; in __blk_mq_do_dispatch_sched() 182 } while (count < max_dispatch); in __blk_mq_do_dispatch_sched() 184 if (!count) { in __blk_mq_do_dispatch_sched() 200 dispatched = blk_mq_dispatch_rq_list(hctx, &rq_list, count); in __blk_mq_do_dispatch_sched()
|
D | blk-timeout.c | 49 const char *buf, size_t count) in part_timeout_store() argument 54 if (count) { in part_timeout_store() 65 return count; in part_timeout_store()
|
D | blk-integrity.c | 234 size_t count) in integrity_attr_store() argument 243 ret = entry->store(bi, page, count); in integrity_attr_store() 268 const char *page, size_t count) in integrity_verify_store() argument 278 return count; in integrity_verify_store() 287 const char *page, size_t count) in integrity_generate_store() argument 297 return count; in integrity_generate_store()
|
D | blk-mq-tag.c | 391 unsigned *count = data; in blk_mq_tagset_count_completed_rqs() local 394 (*count)++; in blk_mq_tagset_count_completed_rqs() 408 unsigned count = 0; in blk_mq_tagset_wait_completed_request() local 411 blk_mq_tagset_count_completed_rqs, &count); in blk_mq_tagset_wait_completed_request() 412 if (!count) in blk_mq_tagset_wait_completed_request()
|
D | disk-events.c | 386 const char *buf, size_t count) in disk_events_poll_msecs_store() argument 391 if (!count || !sscanf(buf, "%ld", &intv)) in disk_events_poll_msecs_store() 403 return count; in disk_events_poll_msecs_store()
|
D | blk-core.c | 1061 unsigned int count; in submit_bio() local 1064 count = queue_logical_block_size( in submit_bio() 1067 count = bio_sectors(bio); in submit_bio() 1070 count_vm_events(PGPGOUT, count); in submit_bio() 1073 count_vm_events(PGPGIN, count); in submit_bio()
|
D | blk.h | 263 const char *buf, size_t count); 352 const char *page, size_t count);
|
D | elevator.c | 775 size_t count) in elv_iosched_store() argument 780 return count; in elv_iosched_store() 784 return count; in elv_iosched_store()
|
D | bfq-iosched.c | 7136 __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 7155 return count; \ 7168 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ 7182 return count; \ 7189 const char *page, size_t count) in bfq_max_budget_store() argument 7209 return count; in bfq_max_budget_store() 7217 const char *page, size_t count) in bfq_timeout_sync_store() argument 7236 return count; in bfq_timeout_sync_store() 7240 const char *page, size_t count) in bfq_strict_guarantees_store() argument 7258 return count; in bfq_strict_guarantees_store() [all …]
|
D | bio.c | 1046 bio->bi_iter.bi_size = iter->count; in __bio_iov_bvec_set() 1054 iov_iter_advance(iter, iter->count); in bio_iov_bvec_set() 1065 iov_iter_advance(iter, i.count); in bio_iov_bvec_set_append()
|
D | genhd.c | 1025 const char *buf, size_t count) in part_fail_store() argument 1029 if (count > 0 && sscanf(buf, "%d", &i) > 0) in part_fail_store() 1032 return count; in part_fail_store()
|
D | kyber-iosched.c | 871 const char *page, size_t count) \ 883 return count; \
|
D | blk-cgroup.c | 427 int count = BLKG_DESTROY_BATCH_SIZE; in blkg_destroy_all() local 443 if (!(--count)) { in blkg_destroy_all() 444 count = BLKG_DESTROY_BATCH_SIZE; in blkg_destroy_all()
|
D | mq-deadline.c | 999 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 1012 return count; \
|
D | blk-map.c | 137 unsigned int len = iter->count; in bio_copy_user_iov()
|
D | blk-throttle.c | 2507 const char *page, size_t count) in blk_throtl_sample_time_store() argument 2520 return count; in blk_throtl_sample_time_store()
|