Home
last modified time | relevance | path

Searched refs:count (Results 1 – 20 of 20) sorted by relevance

/block/partitions/
Daix.c97 size_t count) in read_lba() argument
101 if (!buffer || lba + count / 512 > last_lba(state->bdev)) in read_lba()
104 while (count) { in read_lba()
110 if (copied > count) in read_lba()
111 copied = count; in read_lba()
116 count -= copied; in read_lba()
132 size_t count = sizeof(struct pvd); in alloc_pvd() local
135 p = kmalloc(count, GFP_KERNEL); in alloc_pvd()
139 if (read_lba(state, lba, (u8 *) p, count) < count) { in alloc_pvd()
157 size_t count = sizeof(struct lvname) * LVM_MAXLVS; in alloc_lvn() local
[all …]
Defi.c252 u64 lba, u8 *buffer, size_t count) in read_lba() argument
261 while (count) { in read_lba()
267 if (copied > count) in read_lba()
268 copied = count; in read_lba()
273 count -= copied; in read_lba()
290 size_t count; in alloc_read_gpt_entries() local
296 count = (size_t)le32_to_cpu(gpt->num_partition_entries) * in alloc_read_gpt_entries()
298 if (!count) in alloc_read_gpt_entries()
300 pte = kmalloc(count, GFP_KERNEL); in alloc_read_gpt_entries()
305 (u8 *) pte, count) < count) { in alloc_read_gpt_entries()
/block/
Dbadblocks.c84 hi = bb->count; in badblocks_check()
145 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked()
197 hi = bb->count; in badblocks_set()
243 if (sectors && hi < bb->count) { in badblocks_set()
273 if (sectors == 0 && hi < bb->count) { in badblocks_set()
287 (bb->count - hi - 1) * 8); in badblocks_set()
288 bb->count--; in badblocks_set()
295 if (bb->count >= MAX_BADBLOCKS) { in badblocks_set()
303 (bb->count - hi) * 8); in badblocks_set()
304 bb->count++; in badblocks_set()
[all …]
Dblk-sysfs.c33 queue_var_store(unsigned long *var, const char *page, size_t count) in queue_var_store() argument
44 return count; in queue_var_store()
66 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
74 ret = queue_var_store(&nr, page, count); in queue_requests_store()
101 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
104 ssize_t ret = queue_var_store(&ra_kb, page, count); in queue_ra_store()
189 const char *page, size_t count) in queue_discard_max_store() argument
192 ssize_t ret = queue_var_store(&max_discard, page, count); in queue_discard_max_store()
229 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) in queue_max_sectors_store() argument
234 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); in queue_max_sectors_store()
[all …]
Dblk-mq-debugfs.c92 size_t count, loff_t *ppos) in queue_state_write() argument
105 if (count >= sizeof(opbuf)) { in queue_state_write()
110 if (copy_from_user(opbuf, buf, count)) in queue_state_write()
125 return count; in queue_state_write()
150 size_t count, loff_t *ppos) in queue_write_hint_store() argument
158 return count; in queue_write_hint_store()
533 size_t count, loff_t *ppos) in hctx_io_poll_write() argument
538 return count; in hctx_io_poll_write()
559 size_t count, loff_t *ppos) in hctx_dispatched_write() argument
566 return count; in hctx_dispatched_write()
[all …]
Dblk-integrity.c258 size_t count) in integrity_attr_store() argument
267 ret = entry->store(bi, page, count); in integrity_attr_store()
292 const char *page, size_t count) in integrity_verify_store() argument
302 return count; in integrity_verify_store()
311 const char *page, size_t count) in integrity_generate_store() argument
321 return count; in integrity_generate_store()
Dbsg.c507 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, in __bsg_read() argument
513 if (count % sizeof(struct sg_io_v4)) in __bsg_read()
517 nr_commands = count / sizeof(struct sg_io_v4); in __bsg_read()
569 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) in bsg_read() argument
575 dprintk("%s: read %zd bytes\n", bd->name, count); in bsg_read()
580 ret = __bsg_read(buf, count, bd, NULL, &bytes_read); in bsg_read()
590 size_t count, ssize_t *bytes_written, in __bsg_write() argument
597 if (count % sizeof(struct sg_io_v4)) in __bsg_write()
600 nr_commands = count / sizeof(struct sg_io_v4); in __bsg_write()
644 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) in bsg_write() argument
[all …]
Dblk-timeout.c50 const char *buf, size_t count) in part_timeout_store() argument
55 if (count) { in part_timeout_store()
68 return count; in part_timeout_store()
Dblk-core.c743 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_rl()
1095 if (rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
1098 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
1116 rl->count[sync]--; in freed_request()
1140 if (rl->count[BLK_RW_SYNC] >= on_thresh) in blk_update_nr_requests()
1142 else if (rl->count[BLK_RW_SYNC] < off_thresh) in blk_update_nr_requests()
1145 if (rl->count[BLK_RW_ASYNC] >= on_thresh) in blk_update_nr_requests()
1147 else if (rl->count[BLK_RW_ASYNC] < off_thresh) in blk_update_nr_requests()
1150 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in blk_update_nr_requests()
1157 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { in blk_update_nr_requests()
[all …]
Dcfq-iosched.c98 unsigned count; member
883 return cfqg->service_tree_idle.count; in cfq_group_busy_queues_wl()
885 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + in cfq_group_busy_queues_wl()
886 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + in cfq_group_busy_queues_wl()
887 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; in cfq_group_busy_queues_wl()
893 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + in cfqg_busy_async_queues()
894 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; in cfqg_busy_async_queues()
1185 if (!root->count) in cfq_rb_first()
1204 --root->count; in cfq_rb_erase()
1477 - cfqg->service_tree_idle.count; in cfq_group_served()
[all …]
Ddeadline-iosched.c401 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
414 return count; \
Dpartition-generic.c170 const char *buf, size_t count) in part_fail_store() argument
175 if (count > 0 && sscanf(buf, "%d", &i) > 0) in part_fail_store()
178 return count; in part_fail_store()
Dbfq-iosched.c4879 __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4898 return count; \
4911 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\
4925 return count; \
4932 const char *page, size_t count) in bfq_max_budget_store() argument
4952 return count; in bfq_max_budget_store()
4960 const char *page, size_t count) in bfq_timeout_sync_store() argument
4979 return count; in bfq_timeout_sync_store()
4983 const char *page, size_t count) in bfq_strict_guarantees_store() argument
5001 return count; in bfq_strict_guarantees_store()
[all …]
Dmq-deadline.c485 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
498 return count; \
Delevator.c1098 size_t count) in elv_iosched_store() argument
1103 return count; in elv_iosched_store()
1107 return count; in elv_iosched_store()
Dkyber-iosched.c657 const char *page, size_t count) \
669 return count; \
Dblk.h344 const char *page, size_t count);
Dgenhd.c1834 const char *buf, size_t count) in disk_events_poll_msecs_store() argument
1839 if (!count || !sscanf(buf, "%ld", &intv)) in disk_events_poll_msecs_store()
1849 return count; in disk_events_poll_msecs_store()
Dblk-throttle.c2459 const char *page, size_t count) in blk_throtl_sample_time_store() argument
2472 return count; in blk_throtl_sample_time_store()
Dbio.c1266 unsigned int len = iter->count; in bio_copy_user_iov()