/block/partitions/ |
D | check.h | 32 sector_t n, Sector *p) in read_part_sector() argument 34 if (n >= get_capacity(state->bdev->bd_disk)) { in read_part_sector() 38 return read_dev_sector(state->bdev, n, p); in read_part_sector() 42 put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size) in put_partition() argument 44 if (n < p->limit) { in put_partition() 47 p->parts[n].from = from; in put_partition() 48 p->parts[n].size = size; in put_partition() 49 snprintf(tmp, sizeof(tmp), " %s%d", p->name, n); in put_partition()
|
D | aix.c | 187 struct lvname *n = NULL; in aix_partition() local 226 n = alloc_lvn(state, vgda_sector + vgda_len - 33); in aix_partition() 227 if (n) { in aix_partition() 277 n[lv_ix].name); in aix_partition() 287 char tmp[sizeof(n[i].name) + 1]; // null char in aix_partition() 289 snprintf(tmp, sizeof(tmp), "%s", n[i].name); in aix_partition() 296 kfree(n); in aix_partition()
|
D | msdos.c | 546 sector_t n = 2; in msdos_partition() local 548 n = min(size, max(sector_size, n)); in msdos_partition() 549 put_partition(state, slot, start, n); in msdos_partition() 572 int n; in msdos_partition() local 577 for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++) in msdos_partition() 580 if (!subtypes[n].parse) in msdos_partition() 582 subtypes[n].parse(state, start_sect(p) * sector_size, in msdos_partition()
|
D | efi.c | 256 sector_t n = lba * (bdev_logical_block_size(bdev) / 512); in read_lba() local 264 unsigned char *data = read_part_sector(state, n++, §); in read_lba()
|
/block/ |
D | compat_ioctl.c | 189 compat_int_t n; in compat_blkpg_ioctl() local 192 err = get_user(n, &ua32->op); in compat_blkpg_ioctl() 193 err |= put_user(n, &a->op); in compat_blkpg_ioctl() 194 err |= get_user(n, &ua32->flags); in compat_blkpg_ioctl() 195 err |= put_user(n, &a->flags); in compat_blkpg_ioctl() 196 err |= get_user(n, &ua32->datalen); in compat_blkpg_ioctl() 197 err |= put_user(n, &a->datalen); in compat_blkpg_ioctl()
|
D | blk-zoned.c | 80 unsigned int i, n, nz; in blkdev_report_zones() local 148 n = 0; in blkdev_report_zones() 168 n < nr_rep && nz < nrz) { in blkdev_report_zones() 172 n++; in blkdev_report_zones() 177 if (n >= nr_rep || nz >= nrz) in blkdev_report_zones()
|
D | genhd.c | 339 struct blk_major_name **n, *p; in register_blkdev() local 380 for (n = &major_names[index]; *n; n = &(*n)->next) { in register_blkdev() 381 if ((*n)->major == major) in register_blkdev() 384 if (!*n) in register_blkdev() 385 *n = p; in register_blkdev() 403 struct blk_major_name **n; in unregister_blkdev() local 408 for (n = &major_names[index]; *n; n = &(*n)->next) in unregister_blkdev() 409 if ((*n)->major == major) in unregister_blkdev() 411 if (!*n || strcmp((*n)->name, name)) { in unregister_blkdev() 414 p = *n; in unregister_blkdev() [all …]
|
D | sed-opal.c | 557 u8 n; in add_token_u64() local 574 n = number >> (len * 8); in add_token_u64() 575 add_token_u8(err, cmd, n); in add_token_u64() 689 int n) in response_get_token() argument 693 if (n >= resp->num) { in response_get_token() 695 n, resp->num); in response_get_token() 699 tok = &resp->toks[n]; in response_get_token() 871 static size_t response_get_string(const struct parsed_resp *resp, int n, in response_get_string() argument 880 if (n >= resp->num) { in response_get_string() 882 resp->num, n); in response_get_string() [all …]
|
D | Kconfig | 77 default n 110 default n 122 default n 133 default n 144 default n 154 default n
|
D | ioctl.c | 438 int ret, n; in blkdev_roset() local 445 if (get_user(n, (int __user *)arg)) in blkdev_roset() 447 set_device_ro(bdev, n); in blkdev_roset() 481 int ret, n; in blkdev_bszset() local 487 if (get_user(n, argp)) in blkdev_bszset() 496 ret = set_blocksize(bdev, n); in blkdev_bszset()
|
D | Kconfig.iosched | 39 default n 85 default n 97 default n
|
D | elevator.c | 361 struct rb_node *n = root->rb_node; in elv_rb_find() local 364 while (n) { in elv_rb_find() 365 rq = rb_entry(n, struct request, rb_node); in elv_rb_find() 368 n = n->rb_left; in elv_rb_find() 370 n = n->rb_right; in elv_rb_find()
|
D | blk-tag.c | 393 struct list_head *tmp, *n; in blk_queue_invalidate_tags() local 397 list_for_each_safe(tmp, n, &q->tag_busy_list) in blk_queue_invalidate_tags()
|
D | blk-flush.c | 224 struct request *rq, *n; in flush_end_io() local 248 list_for_each_entry_safe(rq, n, running, flush.list) { in flush_end_io()
|
D | partition-generic.c | 654 unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) in read_dev_sector() argument 659 page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL); in read_dev_sector() 664 return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9); in read_dev_sector()
|
D | bfq-iosched.c | 417 struct rb_node **n; in bfq_rq_pos_tree_lookup() local 427 n = &(*p)->rb_right; in bfq_rq_pos_tree_lookup() 429 n = &(*p)->rb_left; in bfq_rq_pos_tree_lookup() 432 p = n; in bfq_rq_pos_tree_lookup() 781 struct hlist_node *n; in bfq_reset_burst_list() local 783 hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) in bfq_reset_burst_list() 798 struct hlist_node *n; in bfq_add_to_burst() local 822 hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, in bfq_add_to_burst() 4643 struct bfq_queue *bfqq, *n; in bfq_exit_queue() local 4648 list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) in bfq_exit_queue()
|
D | cfq-iosched.c | 1196 static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) in cfq_rb_erase() argument 1198 if (root->rb_rightmost == n) in cfq_rb_erase() 1199 root->rb_rightmost = rb_prev(n); in cfq_rb_erase() 1201 rb_erase_cached(n, &root->rb); in cfq_rb_erase() 1202 RB_CLEAR_NODE(n); in cfq_rb_erase() 1366 struct rb_node *n; in cfq_group_notify_queue_add() local 1377 n = st->rb_rightmost; in cfq_group_notify_queue_add() 1378 if (n) { in cfq_group_notify_queue_add() 1379 __cfqg = rb_entry_cfqg(n); in cfq_group_notify_queue_add() 2349 struct rb_node **n; in cfq_prio_tree_lookup() local [all …]
|
D | blk-throttle.c | 633 static void rb_erase_init(struct rb_node *n, struct rb_root *root) in rb_erase_init() argument 635 rb_erase(n, root); in rb_erase_init() 636 RB_CLEAR_NODE(n); in rb_erase_init() 639 static void throtl_rb_erase(struct rb_node *n, in throtl_rb_erase() argument 642 if (parent_sq->first_pending == n) in throtl_rb_erase() 644 rb_erase_init(n, &parent_sq->pending_tree); in throtl_rb_erase()
|
D | blk-cgroup.c | 364 struct blkcg_gq *blkg, *n; in blkg_destroy_all() local 368 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { in blkg_destroy_all()
|
D | bio.c | 46 #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n } argument
|