/block/ |
D | blk-cgroup-rwstat.c | 10 int i, ret; in blkg_rwstat_init() local 12 for (i = 0; i < BLKG_RWSTAT_NR; i++) { in blkg_rwstat_init() 13 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp); in blkg_rwstat_init() 15 while (--i >= 0) in blkg_rwstat_init() 16 percpu_counter_destroy(&rwstat->cpu_cnt[i]); in blkg_rwstat_init() 19 atomic64_set(&rwstat->aux_cnt[i], 0); in blkg_rwstat_init() 27 int i; in blkg_rwstat_exit() local 29 for (i = 0; i < BLKG_RWSTAT_NR; i++) in blkg_rwstat_exit() 30 percpu_counter_destroy(&rwstat->cpu_cnt[i]); in blkg_rwstat_exit() 54 int i; in __blkg_prfill_rwstat() local [all …]
|
D | blk-cgroup-rwstat.h | 92 int i; in blkg_rwstat_read() local 94 for (i = 0; i < BLKG_RWSTAT_NR; i++) in blkg_rwstat_read() 95 result->cnt[i] = in blkg_rwstat_read() 96 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]); in blkg_rwstat_read() 121 int i; in blkg_rwstat_reset() local 123 for (i = 0; i < BLKG_RWSTAT_NR; i++) { in blkg_rwstat_reset() 124 percpu_counter_set(&rwstat->cpu_cnt[i], 0); in blkg_rwstat_reset() 125 atomic64_set(&rwstat->aux_cnt[i], 0); in blkg_rwstat_reset() 140 int i; in blkg_rwstat_add_aux() local 142 for (i = 0; i < BLKG_RWSTAT_NR; i++) in blkg_rwstat_add_aux() [all …]
|
D | blk-ia-ranges.c | 112 int i, ret; in disk_register_independent_access_ranges() local 134 for (i = 0; i < iars->nr_ia_ranges; i++) { in disk_register_independent_access_ranges() 135 ret = kobject_init_and_add(&iars->ia_range[i].kobj, in disk_register_independent_access_ranges() 137 "%d", i); in disk_register_independent_access_ranges() 139 while (--i >= 0) in disk_register_independent_access_ranges() 140 kobject_del(&iars->ia_range[i].kobj); in disk_register_independent_access_ranges() 156 int i; in disk_unregister_independent_access_ranges() local 165 for (i = 0; i < iars->nr_ia_ranges; i++) in disk_unregister_independent_access_ranges() 166 kobject_del(&iars->ia_range[i].kobj); in disk_unregister_independent_access_ranges() 181 int i; in disk_find_ia_range() local [all …]
|
D | blk-cgroup.c | 124 int i; in blkg_free_workfn() local 134 for (i = 0; i < BLKCG_MAX_POLS; i++) in blkg_free_workfn() 135 if (blkg->pd[i]) in blkg_free_workfn() 136 blkcg_policy[i]->pd_free_fn(blkg->pd[i]); in blkg_free_workfn() 309 int i, cpu; in blkg_alloc() local 339 for (i = 0; i < BLKCG_MAX_POLS; i++) { in blkg_alloc() 340 struct blkcg_policy *pol = blkcg_policy[i]; in blkg_alloc() 350 blkg->pd[i] = pd; in blkg_alloc() 352 pd->plid = i; in blkg_alloc() 359 while (--i >= 0) in blkg_alloc() [all …]
|
D | kyber-iosched.c | 361 int i; in kyber_queue_data_alloc() local 377 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { in kyber_queue_data_alloc() 378 WARN_ON(!kyber_depth[i]); in kyber_queue_data_alloc() 379 WARN_ON(!kyber_batch_size[i]); in kyber_queue_data_alloc() 380 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], in kyber_queue_data_alloc() 381 kyber_depth[i], -1, false, in kyber_queue_data_alloc() 384 while (--i >= 0) in kyber_queue_data_alloc() 385 sbitmap_queue_free(&kqd->domain_tokens[i]); in kyber_queue_data_alloc() 390 for (i = 0; i < KYBER_OTHER; i++) { in kyber_queue_data_alloc() 391 kqd->domain_p99[i] = -1; in kyber_queue_data_alloc() [all …]
|
D | blk-crypto-fallback.c | 147 int i; in blk_crypto_fallback_encrypt_endio() local 149 for (i = 0; i < enc_bio->bi_vcnt; i++) in blk_crypto_fallback_encrypt_endio() 150 mempool_free(enc_bio->bi_io_vec[i].bv_page, in blk_crypto_fallback_encrypt_endio() 216 unsigned int i = 0; in blk_crypto_fallback_split_bio_if_needed() local 223 if (++i == BIO_MAX_VECS) in blk_crypto_fallback_split_bio_if_needed() 251 int i; in blk_crypto_dun_to_iv() local 253 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) in blk_crypto_dun_to_iv() 254 iv->dun[i] = cpu_to_le64(dun[i]); in blk_crypto_dun_to_iv() 275 unsigned int i, j; in blk_crypto_fallback_encrypt_bio() local 319 for (i = 0; i < enc_bio->bi_vcnt; i++) { in blk_crypto_fallback_encrypt_bio() [all …]
|
D | blk-crypto.c | 68 size_t i; in bio_crypt_ctx_init() local 86 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) { in bio_crypt_ctx_init() 87 BUG_ON(blk_crypto_modes[i].keysize > in bio_crypt_ctx_init() 89 BUG_ON(blk_crypto_modes[i].security_strength > in bio_crypt_ctx_init() 90 blk_crypto_modes[i].keysize); in bio_crypt_ctx_init() 91 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE); in bio_crypt_ctx_init() 139 int i; in bio_crypt_dun_increment() local 141 for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { in bio_crypt_dun_increment() 142 dun[i] += inc; in bio_crypt_dun_increment() 147 if (dun[i] < inc) in bio_crypt_dun_increment() [all …]
|
D | blk-mq-sysfs.c | 85 unsigned int i, first = 1; in blk_mq_hw_sysfs_cpus_show() local 88 for_each_cpu(i, hctx->cpumask) { in blk_mq_hw_sysfs_cpus_show() 90 ret = snprintf(pos + page, size - pos, "%u", i); in blk_mq_hw_sysfs_cpus_show() 92 ret = snprintf(pos + page, size - pos, ", %u", i); in blk_mq_hw_sysfs_cpus_show() 147 int i; in blk_mq_unregister_hctx() local 152 hctx_for_each_ctx(hctx, ctx, i) in blk_mq_unregister_hctx() 162 int i, j, ret; in blk_mq_register_hctx() local 171 hctx_for_each_ctx(hctx, ctx, i) { in blk_mq_register_hctx() 180 if (j < i) in blk_mq_register_hctx() 223 unsigned long i, j; in blk_mq_sysfs_register() local [all …]
|
D | blk-mq.c | 307 unsigned long i; in blk_mq_wake_waiters() local 309 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters() 411 int i, nr = 0; in __blk_mq_alloc_requests_batch() local 418 for (i = 0; tag_mask; i++) { in __blk_mq_alloc_requests_batch() 419 if (!(tag_mask & (1UL << i))) in __blk_mq_alloc_requests_batch() 421 tag = tag_offset + i; in __blk_mq_alloc_requests_batch() 423 tag_mask &= ~(1UL << i); in __blk_mq_alloc_requests_batch() 1642 unsigned long i; in blk_mq_timeout_work() local 1684 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work() 2327 unsigned long i; in blk_mq_run_hw_queues() local [all …]
|
D | blk-map.c | 137 int i = 0, ret; in bio_copy_user_iov() local 164 i = map_data->offset / PAGE_SIZE; in bio_copy_user_iov() 175 if (i == map_data->nr_entries * nr_pages) { in bio_copy_user_iov() 180 page = map_data->pages[i / nr_pages]; in bio_copy_user_iov() 181 page += (i % nr_pages); in bio_copy_user_iov() 183 i++; in bio_copy_user_iov() 363 unsigned long i, len = 0; in bio_invalidate_vmalloc_pages() local 365 for (i = 0; i < bio->bi_vcnt; i++) in bio_invalidate_vmalloc_pages() 366 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages() 398 int offset, i; in bio_map_kern() local [all …]
|
D | blk-crypto-profile.c | 78 unsigned int i; in blk_crypto_profile_init() local 128 for (i = 0; i < slot_hashtable_size; i++) in blk_crypto_profile_init() 129 INIT_HLIST_HEAD(&profile->slot_hashtable[i]); in blk_crypto_profile_init() 522 unsigned int i; in blk_crypto_intersect_capabilities() local 527 for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++) in blk_crypto_intersect_capabilities() 528 parent->modes_supported[i] &= child->modes_supported[i]; in blk_crypto_intersect_capabilities() 550 int i; in blk_crypto_has_capabilities() local 558 for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) { in blk_crypto_has_capabilities() 559 if (reference->modes_supported[i] & ~target->modes_supported[i]) in blk_crypto_has_capabilities()
|
D | blk-mq-debugfs-zoned.c | 12 unsigned int i; in queue_zone_wlock_show() local 17 for (i = 0; i < q->disk->nr_zones; i++) in queue_zone_wlock_show() 18 if (test_bit(i, q->disk->seq_zones_wlock)) in queue_zone_wlock_show() 19 seq_printf(m, "%u\n", i); in queue_zone_wlock_show()
|
D | badblocks.c | 130 int i; in badblocks_update_acked() local 136 for (i = 0; i < bb->count ; i++) { in badblocks_update_acked() 137 if (!BB_ACK(p[i])) { in badblocks_update_acked() 440 int i; in ack_all_badblocks() local 442 for (i = 0; i < bb->count ; i++) { in ack_all_badblocks() 443 if (!BB_ACK(p[i])) { in ack_all_badblocks() 444 sector_t start = BB_OFFSET(p[i]); in ack_all_badblocks() 445 int len = BB_LEN(p[i]); in ack_all_badblocks() 447 p[i] = BB_MAKE(start, len, 1); in ack_all_badblocks() 468 int i; in badblocks_show() local [all …]
|
D | t10-pi.c | 35 unsigned int i; in t10_pi_generate() local 37 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in t10_pi_generate() 59 unsigned int i; in t10_pi_verify() local 63 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in t10_pi_verify() 291 unsigned int i; in ext_pi_crc64_generate() local 293 for (i = 0 ; i < iter->data_size ; i += iter->interval) { in ext_pi_crc64_generate() 322 unsigned int i; in ext_pi_crc64_verify() local 324 for (i = 0; i < iter->data_size; i += iter->interval) { in ext_pi_crc64_verify()
|
D | disk-events.c | 174 int nr_events = 0, i; in disk_event_uevent() local 176 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) in disk_event_uevent() 177 if (events & disk->events & (1 << i)) in disk_event_uevent() 178 envp[nr_events++] = disk_uevents[i]; in disk_event_uevent() 332 int i; in __disk_events_show() local 334 for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++) in __disk_events_show() 335 if (events & (1 << i)) { in __disk_events_show() 337 delim, disk_events_strs[i]); in __disk_events_show()
|
D | blk-wbt.c | 186 int i; in rwb_wake_all() local 188 for (i = 0; i < WBT_NUM_RWQ; i++) { in rwb_wake_all() 189 struct rq_wait *rqw = &rwb->rq_wait[i]; in rwb_wake_all() 289 unsigned int i, ret = 0; in wbt_inflight() local 291 for (i = 0; i < WBT_NUM_RWQ; i++) in wbt_inflight() 292 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight() 827 int i; in wbt_inflight_show() local 829 for (i = 0; i < WBT_NUM_RWQ; i++) in wbt_inflight_show() 830 seq_printf(m, "%d: inflight %d\n", i, in wbt_inflight_show() 831 atomic_read(&rwb->rq_wait[i].inflight)); in wbt_inflight_show() [all …]
|
D | blk-mq-sched.c | 409 unsigned long i; in blk_mq_sched_tags_teardown() local 411 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_tags_teardown() 448 unsigned long i; in blk_mq_init_sched() local 465 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched() 466 ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i); in blk_mq_init_sched() 479 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_sched() 481 ret = e->ops.init_hctx(hctx, i); in blk_mq_init_sched() 512 unsigned long i; in blk_mq_sched_free_rqs() local 518 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_sched_free_rqs() 521 hctx->sched_tags, i); in blk_mq_sched_free_rqs() [all …]
|
D | blk-crypto-sysfs.c | 160 int i; in blk_crypto_sysfs_init() local 163 for (i = 1; i < BLK_ENCRYPTION_MODE_MAX; i++) { in blk_crypto_sysfs_init() 164 struct blk_crypto_attr *attr = &__blk_crypto_mode_attrs[i]; in blk_crypto_sysfs_init() 166 attr->attr.name = blk_crypto_modes[i].name; in blk_crypto_sysfs_init() 169 blk_crypto_mode_attrs[i - 1] = &attr->attr; in blk_crypto_sysfs_init()
|
/block/partitions/ |
D | efi.c | 175 int i, part = 0, ret = 0; /* invalid by default */ in is_pmbr_valid() local 180 for (i = 0; i < 4; i++) { in is_pmbr_valid() 181 ret = pmbr_part_valid(&mbr->partition_record[i]); in is_pmbr_valid() 183 part = i; in is_pmbr_valid() 196 for (i = 0; i < 4; i++) in is_pmbr_valid() 197 if ((mbr->partition_record[i].os_type != in is_pmbr_valid() 199 (mbr->partition_record[i].os_type != 0x00)) in is_pmbr_valid() 680 unsigned int i = 0; in utf16_le_to_7bit() local 684 while (i < size) { in utf16_le_to_7bit() 685 u8 c = le16_to_cpu(in[i]) & 0xff; in utf16_le_to_7bit() [all …]
|
D | acorn.c | 330 int i; in valid_ics_sector() local 332 for (i = 0, sum = 0x50617274; i < 508; i++) in valid_ics_sector() 333 sum += data[i]; in valid_ics_sector() 421 int i; in valid_ptec_sector() local 430 for (i = 0; i < 511; i++) in valid_ptec_sector() 431 checksum += data[i]; in valid_ptec_sector() 452 int i; in adfspart_check_POWERTEC() local 465 for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) { in adfspart_check_POWERTEC() 514 int i, slot = 1; in adfspart_check_EESOX() local 523 for (i = 0; i < 256; i++) in adfspart_check_EESOX() [all …]
|
D | aix.c | 207 int i; in aix_partition() local 213 for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) { in aix_partition() 214 lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps); in aix_partition() 215 if (lvip[i].pps_per_lv) in aix_partition() 226 int i; in aix_partition() local 231 for (i = 0; i < numpps; i += 1) { in aix_partition() 232 struct ppe *p = pvd->ppe + i; in aix_partition() 257 (i + 1 - lp_ix) * pp_blocks_size + psn_part1, in aix_partition() 268 for (i = 0; i < state->limit; i += 1) in aix_partition() 269 if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) { in aix_partition() [all …]
|
D | ultrix.c | 14 int i; in ultrix_partition() local 36 for (i=0; i<8; i++) in ultrix_partition() 37 if (label->pt_part[i].pi_nblocks) in ultrix_partition() 38 put_partition(state, i+1, in ultrix_partition() 39 label->pt_part[i].pi_blkoff, in ultrix_partition() 40 label->pt_part[i].pi_nblocks); in ultrix_partition()
|
D | mac.c | 25 int i; in mac_fix_string() local 27 for (i = len - 1; i >= 0 && stg[i] == ' '; i--) in mac_fix_string() 28 stg[i] = 0; in mac_fix_string() 110 int i, l; in mac_partition() local 116 for (i = 0; i <= l - 4; ++i) { in mac_partition() 117 if (strncasecmp(part->name + i, "root", in mac_partition()
|
D | sysv68.c | 51 int i, slices; in sysv68_partition() local 69 i = be32_to_cpu(b->dk_ios.ios_slcblk); in sysv68_partition() 72 data = read_part_sector(state, i, §); in sysv68_partition() 80 for (i = 0; i < slices; i++, slice++) { in sysv68_partition() 87 snprintf(tmp, sizeof(tmp), "(s%u)", i); in sysv68_partition()
|
D | msdos.c | 141 int i; in parse_extended() local 173 for (i = 0; i < 4; i++, p++) { in parse_extended() 184 if (i >= 2) { in parse_extended() 209 for (i = 0; i < 4; i++, p++) in parse_extended() 212 if (i == 4) in parse_extended() 256 int i; in parse_solaris_x86() local 283 for (i = 0; i < max_nparts && state->next < state->limit; i++) { in parse_solaris_x86() 284 struct solaris_x86_slice *s = &v->v_slice[i]; in parse_solaris_x86() 289 snprintf(tmp, sizeof(tmp), " [s%d]", i); in parse_solaris_x86() 536 int i; in parse_minix() local [all …]
|