/block/ |
D | blk-cgroup.c | 67 int i; in blkg_free() local 72 for (i = 0; i < BLKCG_MAX_POLS; i++) in blkg_free() 73 if (blkg->pd[i]) in blkg_free() 74 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free() 96 int i; in blkg_alloc() local 119 for (i = 0; i < BLKCG_MAX_POLS; i++) { in blkg_alloc() 120 struct blkcg_policy *pol = blkcg_policy[i]; in blkg_alloc() 131 blkg->pd[i] = pd; in blkg_alloc() 133 pd->plid = i; in blkg_alloc() 177 int i, ret; in blkg_create() local [all …]
|
D | blk-mq-debugfs.c | 31 int i; in blk_flags_show() local 33 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { in blk_flags_show() 34 if (!(flags & BIT(i))) in blk_flags_show() 39 if (i < flag_name_count && flag_name[i]) in blk_flags_show() 40 seq_puts(m, flag_name[i]); in blk_flags_show() 42 seq_printf(m, "%d", i); in blk_flags_show() 141 int i; in queue_write_hint_show() local 143 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) in queue_write_hint_show() 144 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); in queue_write_hint_show() 153 int i; in queue_write_hint_store() local [all …]
|
D | kyber-iosched.c | 289 int i; in kyber_queue_data_alloc() local 308 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { in kyber_queue_data_alloc() 309 WARN_ON(!kyber_depth[i]); in kyber_queue_data_alloc() 310 WARN_ON(!kyber_batch_size[i]); in kyber_queue_data_alloc() 311 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], in kyber_queue_data_alloc() 315 while (--i >= 0) in kyber_queue_data_alloc() 316 sbitmap_queue_free(&kqd->domain_tokens[i]); in kyber_queue_data_alloc() 319 sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]); in kyber_queue_data_alloc() 365 int i; in kyber_exit_sched() local 369 for (i = 0; i < KYBER_NUM_DOMAINS; i++) in kyber_exit_sched() [all …]
|
D | blk-mq.c | 235 unsigned int i; in blk_mq_quiesce_queue() local 240 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_quiesce_queue() 274 unsigned int i; in blk_mq_wake_waiters() local 276 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters() 838 int i; in blk_mq_timeout_work() local 864 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work() 1271 int i; in blk_mq_run_hw_queues() local 1273 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues() 1293 int i; in blk_mq_queue_stopped() local 1295 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_queue_stopped() [all …]
|
D | blk-mq-sysfs.c | 149 unsigned int i, first = 1; in blk_mq_hw_sysfs_cpus_show() local 152 for_each_cpu(i, hctx->cpumask) { in blk_mq_hw_sysfs_cpus_show() 154 ret = snprintf(pos + page, size - pos, "%u", i); in blk_mq_hw_sysfs_cpus_show() 156 ret = snprintf(pos + page, size - pos, ", %u", i); in blk_mq_hw_sysfs_cpus_show() 223 int i; in blk_mq_unregister_hctx() local 228 hctx_for_each_ctx(hctx, ctx, i) in blk_mq_unregister_hctx() 238 int i, ret; in blk_mq_register_hctx() local 247 hctx_for_each_ctx(hctx, ctx, i) { in blk_mq_register_hctx() 259 int i; in __blk_mq_unregister_dev() local 263 queue_for_each_hw_ctx(q, hctx, i) in __blk_mq_unregister_dev() [all …]
|
D | badblocks.c | 139 int i; in badblocks_update_acked() local 145 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked() 146 if (!BB_ACK(p[i])) { in badblocks_update_acked() 450 int i; in ack_all_badblocks() local 452 for (i = 0; i < bb->count ; i++) { in ack_all_badblocks() 453 if (!BB_ACK(p[i])) { in ack_all_badblocks() 454 sector_t start = BB_OFFSET(p[i]); in ack_all_badblocks() 455 int len = BB_LEN(p[i]); in ack_all_badblocks() 457 p[i] = BB_MAKE(start, len, 1); in ack_all_badblocks() 478 int i; in badblocks_show() local [all …]
|
D | bio.c | 78 unsigned int i, entry = -1; in bio_find_or_create_slab() local 82 i = 0; in bio_find_or_create_slab() 83 while (i < bio_slab_nr) { in bio_find_or_create_slab() 84 bslab = &bio_slabs[i]; in bio_find_or_create_slab() 87 entry = i; in bio_find_or_create_slab() 93 i++; in bio_find_or_create_slab() 131 unsigned int i; in bio_put_slab() local 135 for (i = 0; i < bio_slab_nr; i++) { in bio_put_slab() 136 if (bs->bio_slab == bio_slabs[i].slab) { in bio_put_slab() 137 bslab = &bio_slabs[i]; in bio_put_slab() [all …]
|
D | t10-pi.c | 49 unsigned int i; in t10_pi_generate() local 51 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in t10_pi_generate() 73 unsigned int i; in t10_pi_verify() local 75 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in t10_pi_verify()
|
D | blk-mq-sched.c | 23 int i; in blk_mq_sched_free_hctx_data() local 25 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_hctx_data() 309 unsigned int i, j; in blk_mq_sched_restart() local 322 queue_for_each_hw_ctx(q, hctx2, i) in blk_mq_sched_restart() 328 for (i = 0; i < queue->nr_hw_queues; i++, j++) { in blk_mq_sched_restart() 456 int i; in blk_mq_sched_tags_teardown() local 458 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_sched_tags_teardown() 459 blk_mq_sched_free_tags(set, hctx, i); in blk_mq_sched_tags_teardown() 510 unsigned int i; in blk_mq_init_sched() local 526 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched() [all …]
|
D | blk-mq-tag.c | 292 int i; in blk_mq_tagset_busy_iter() local 294 for (i = 0; i < tagset->nr_hw_queues; i++) { in blk_mq_tagset_busy_iter() 295 if (tagset->tags && tagset->tags[i]) in blk_mq_tagset_busy_iter() 296 blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv); in blk_mq_tagset_busy_iter() 304 int i, j, ret = 0; in blk_mq_reinit_tagset() local 309 for (i = 0; i < set->nr_hw_queues; i++) { in blk_mq_reinit_tagset() 310 struct blk_mq_tags *tags = set->tags[i]; in blk_mq_reinit_tagset() 335 int i; in blk_mq_queue_tag_busy_iter() local 338 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_queue_tag_busy_iter()
|
D | blk-mq-cpumap.c | 67 int i; in blk_mq_hw_queue_to_node() local 69 for_each_possible_cpu(i) { in blk_mq_hw_queue_to_node() 70 if (index == mq_map[i]) in blk_mq_hw_queue_to_node() 71 return local_memory_node(cpu_to_node(i)); in blk_mq_hw_queue_to_node()
|
D | blk-map.c | 128 struct iov_iter i; in blk_rq_map_user_iov() local 141 i = *iter; in blk_rq_map_user_iov() 143 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); in blk_rq_map_user_iov() 148 } while (iov_iter_count(&i)); in blk_rq_map_user_iov() 167 struct iov_iter i; in blk_rq_map_user() local 168 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); in blk_rq_map_user() 173 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user()
|
D | blk-wbt.h | 102 unsigned int i, ret = 0; in wbt_inflight() local 104 for (i = 0; i < WBT_NUM_RWQ; i++) in wbt_inflight() 105 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight()
|
D | genhd.c | 270 int i; in disk_map_sector_rcu() local 278 for (i = 1; i < ptbl->len; i++) { in disk_map_sector_rcu() 279 part = rcu_dereference(ptbl->part[i]); in disk_map_sector_rcu() 441 int i; in blk_mangle_minor() local 443 for (i = 0; i < MINORBITS / 2; i++) { in blk_mangle_minor() 444 int low = minor & (1 << i); in blk_mangle_minor() 445 int high = minor & (1 << (MINORBITS - 1 - i)); in blk_mangle_minor() 446 int distance = MINORBITS - 1 - 2 * i; in blk_mangle_minor() 1187 int i, target; in disk_expand_part_tbl() local 1212 for (i = 0; i < len; i++) in disk_expand_part_tbl() [all …]
|
D | blk-wbt.c | 111 int i; in rwb_wake_all() local 113 for (i = 0; i < WBT_NUM_RWQ; i++) { in rwb_wake_all() 114 struct rq_wait *rqw = &rwb->rq_wait[i]; in rwb_wake_all() 715 int i; in wbt_init() local 729 for (i = 0; i < WBT_NUM_RWQ; i++) { in wbt_init() 730 atomic_set(&rwb->rq_wait[i].inflight, 0); in wbt_init() 731 init_waitqueue_head(&rwb->rq_wait[i].wait); in wbt_init()
|
/block/partitions/ |
D | acorn.c | 336 int i; in valid_ics_sector() local 338 for (i = 0, sum = 0x50617274; i < 508; i++) in valid_ics_sector() 339 sum += data[i]; in valid_ics_sector() 427 int i; in valid_ptec_sector() local 436 for (i = 0; i < 511; i++) in valid_ptec_sector() 437 checksum += data[i]; in valid_ptec_sector() 458 int i; in adfspart_check_POWERTEC() local 471 for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) { in adfspart_check_POWERTEC() 520 int i, slot = 1; in adfspart_check_EESOX() local 529 for (i = 0; i < 256; i++) in adfspart_check_EESOX() [all …]
|
D | aix.c | 224 int i; in aix_partition() local 230 for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) { in aix_partition() 231 lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps); in aix_partition() 232 if (lvip[i].pps_per_lv) in aix_partition() 243 int i; in aix_partition() local 248 for (i = 0; i < numpps; i += 1) { in aix_partition() 249 struct ppe *p = pvd->ppe + i; in aix_partition() 274 (i + 1 - lp_ix) * pp_blocks_size + psn_part1, in aix_partition() 285 for (i = 0; i < state->limit; i += 1) in aix_partition() 286 if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) { in aix_partition() [all …]
|
D | efi.c | 191 int i, part = 0, ret = 0; /* invalid by default */ in is_pmbr_valid() local 196 for (i = 0; i < 4; i++) { in is_pmbr_valid() 197 ret = pmbr_part_valid(&mbr->partition_record[i]); in is_pmbr_valid() 199 part = i; in is_pmbr_valid() 212 for (i = 0; i < 4; i++) in is_pmbr_valid() 213 if ((mbr->partition_record[i].os_type != in is_pmbr_valid() 215 (mbr->partition_record[i].os_type != 0x00)) in is_pmbr_valid() 696 u32 i; in efi_partition() local 707 for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) { in efi_partition() 711 u64 start = le64_to_cpu(ptes[i].starting_lba); in efi_partition() [all …]
|
D | ultrix.c | 15 int i; in ultrix_partition() local 37 for (i=0; i<8; i++) in ultrix_partition() 38 if (label->pt_part[i].pi_nblocks) in ultrix_partition() 39 put_partition(state, i+1, in ultrix_partition() 40 label->pt_part[i].pi_blkoff, in ultrix_partition() 41 label->pt_part[i].pi_nblocks); in ultrix_partition()
|
D | mac.c | 25 int i; in mac_fix_string() local 27 for (i = len - 1; i >= 0 && stg[i] == ' '; i--) in mac_fix_string() 28 stg[i] = 0; in mac_fix_string() 110 int i, l; in mac_partition() local 116 for (i = 0; i <= l - 4; ++i) { in mac_partition() 117 if (strncasecmp(part->name + i, "root", in mac_partition()
|
D | sysv68.c | 52 int i, slices; in sysv68_partition() local 70 i = be32_to_cpu(b->dk_ios.ios_slcblk); in sysv68_partition() 73 data = read_part_sector(state, i, §); in sysv68_partition() 81 for (i = 0; i < slices; i++, slice++) { in sysv68_partition() 88 snprintf(tmp, sizeof(tmp), "(s%u)", i); in sysv68_partition()
|
D | msdos.c | 132 int i; in parse_extended() local 163 for (i = 0; i < 4; i++, p++) { in parse_extended() 174 if (i >= 2) { in parse_extended() 199 for (i = 0; i < 4; i++, p++) in parse_extended() 202 if (i == 4) in parse_extended() 222 int i; in parse_solaris_x86() local 249 for (i = 0; i < max_nparts && state->next < state->limit; i++) { in parse_solaris_x86() 250 struct solaris_x86_slice *s = &v->v_slice[i]; in parse_solaris_x86() 255 snprintf(tmp, sizeof(tmp), " [s%d]", i); in parse_solaris_x86() 407 int i; in parse_minix() local [all …]
|
D | sun.c | 16 int i; in sun_partition() local 102 for (i = 0; i < nparts; i++, p++) { in sun_partition() 112 if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION) in sun_partition() 114 else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK) in sun_partition()
|
D | karma.c | 16 int i; in karma_partition() local 45 for (i = 0 ; i < 2; i++, p++) { in karma_partition()
|
D | ldm.c | 288 int i; in ldm_validate_privheads() local 303 for (i = 0; i < 3; i++) { in ldm_validate_privheads() 304 data = read_part_sector(state, ph[0]->config_start + off[i], in ldm_validate_privheads() 310 result = ldm_parse_privhead (data, ph[i]); in ldm_validate_privheads() 313 ldm_error ("Cannot find PRIVHEAD %d.", i+1); /* Log again */ in ldm_validate_privheads() 314 if (i < 2) in ldm_validate_privheads() 375 int i, nr_tbs; in ldm_validate_tocblocks() local 394 for (nr_tbs = i = 0; i < 4; i++) { in ldm_validate_tocblocks() 395 data = read_part_sector(state, base + off[i], §); in ldm_validate_tocblocks() 397 ldm_error("Disk read failed for TOCBLOCK %d.", i); in ldm_validate_tocblocks() [all …]
|