Home
last modified time | relevance | path

Searched refs:count (Results 1 – 14 of 14) sorted by relevance

/block/partitions/
Daix.c96 size_t count) in read_lba() argument
100 if (!buffer || lba + count / 512 > last_lba(state->bdev)) in read_lba()
103 while (count) { in read_lba()
109 if (copied > count) in read_lba()
110 copied = count; in read_lba()
115 count -= copied; in read_lba()
131 size_t count = sizeof(struct pvd); in alloc_pvd() local
134 p = kmalloc(count, GFP_KERNEL); in alloc_pvd()
138 if (read_lba(state, lba, (u8 *) p, count) < count) { in alloc_pvd()
156 size_t count = sizeof(struct lvname) * LVM_MAXLVS; in alloc_lvn() local
[all …]
Defi.c252 u64 lba, u8 *buffer, size_t count) in read_lba() argument
261 while (count) { in read_lba()
267 if (copied > count) in read_lba()
268 copied = count; in read_lba()
273 count -= copied; in read_lba()
290 size_t count; in alloc_read_gpt_entries() local
296 count = (size_t)le32_to_cpu(gpt->num_partition_entries) * in alloc_read_gpt_entries()
298 if (!count) in alloc_read_gpt_entries()
300 pte = kmalloc(count, GFP_KERNEL); in alloc_read_gpt_entries()
305 (u8 *) pte, count) < count) { in alloc_read_gpt_entries()
/block/
Dblk-sysfs.c30 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument
41 return count; in queue_var_store()
50 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
58 ret = queue_var_store(&nr, page, count); in queue_requests_store()
85 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
88 ssize_t ret = queue_var_store(&ra_kb, page, count); in queue_ra_store()
163 const char *page, size_t count) in queue_discard_max_store() argument
166 ssize_t ret = queue_var_store(&max_discard, page, count); in queue_discard_max_store()
198 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
203 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); in queue_max_sectors_store()
[all …]
Dblk-integrity.c258 size_t count) in integrity_attr_store() argument
267 ret = entry->store(bi, page, count); in integrity_attr_store()
292 const char *page, size_t count) in integrity_verify_store() argument
302 return count; in integrity_verify_store()
311 const char *page, size_t count) in integrity_generate_store() argument
321 return count; in integrity_generate_store()
Dbsg.c513 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, in __bsg_read() argument
519 if (count % sizeof(struct sg_io_v4)) in __bsg_read()
523 nr_commands = count / sizeof(struct sg_io_v4); in __bsg_read()
575 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) in bsg_read() argument
581 dprintk("%s: read %Zd bytes\n", bd->name, count); in bsg_read()
586 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); in bsg_read()
596 size_t count, ssize_t *bytes_written, in __bsg_write() argument
603 if (count % sizeof(struct sg_io_v4)) in __bsg_write()
606 nr_commands = count / sizeof(struct sg_io_v4); in __bsg_write()
650 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) in bsg_write() argument
[all …]
Dblk-timeout.c50 const char *buf, size_t count) in part_timeout_store() argument
55 if (count) { in part_timeout_store()
68 return count; in part_timeout_store()
Dblk-map.c90 if (!iter || !iter->count) in blk_rq_map_user_iov()
113 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) in blk_rq_map_user_iov()
124 if (bio->bi_iter.bi_size != iter->count) { in blk_rq_map_user_iov()
Dblk-core.c626 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_rl()
943 if (rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
946 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
964 rl->count[sync]--; in freed_request()
986 if (rl->count[BLK_RW_SYNC] >= on_thresh) in blk_update_nr_requests()
988 else if (rl->count[BLK_RW_SYNC] < off_thresh) in blk_update_nr_requests()
991 if (rl->count[BLK_RW_ASYNC] >= on_thresh) in blk_update_nr_requests()
993 else if (rl->count[BLK_RW_ASYNC] < off_thresh) in blk_update_nr_requests()
996 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
1003 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
[all …]
Ddeadline-iosched.c383 deadline_var_store(int *var, const char *page, size_t count) in deadline_var_store() argument
388 return count; in deadline_var_store()
408 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
412 int ret = deadline_var_store(&__data, (page), count); \
Dcfq-iosched.c92 unsigned count; member
847 return cfqg->service_tree_idle.count; in cfq_group_busy_queues_wl()
849 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + in cfq_group_busy_queues_wl()
850 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + in cfq_group_busy_queues_wl()
851 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; in cfq_group_busy_queues_wl()
857 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + in cfqg_busy_async_queues()
858 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; in cfqg_busy_async_queues()
1168 if (!root->count) in cfq_rb_first()
1202 --root->count; in cfq_rb_erase()
1464 - cfqg->service_tree_idle.count; in cfq_group_served()
[all …]
Dpartition-generic.c158 const char *buf, size_t count) in part_fail_store() argument
163 if (count > 0 && sscanf(buf, "%d", &i) > 0) in part_fail_store()
166 return count; in part_fail_store()
Delevator.c987 size_t count) in elv_iosched_store() argument
992 return count; in elv_iosched_store()
996 return count; in elv_iosched_store()
Dgenhd.c1736 const char *buf, size_t count) in disk_events_poll_msecs_store() argument
1741 if (!count || !sscanf(buf, "%ld", &intv)) in disk_events_poll_msecs_store()
1751 return count; in disk_events_poll_msecs_store()
Dbio.c1140 unsigned int len = iter->count; in bio_copy_user_iov()
1177 iter->nr_segs, iter->count); in bio_copy_user_iov()