/block/ |
D | blk-mq-tag.c | 28 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_busy() 64 if (blk_mq_is_sbitmap_shared(hctx->flags)) { in __blk_mq_tag_idle() 81 if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && in __blk_mq_get_tag() 100 if (data->flags & BLK_MQ_REQ_RESERVED) { in blk_mq_get_tag() 116 if (data->flags & BLK_MQ_REQ_NOWAIT) in blk_mq_get_tag() 153 if (data->flags & BLK_MQ_REQ_RESERVED) in blk_mq_get_tag() 208 unsigned long flags; in blk_mq_find_and_get_req() local 210 spin_lock_irqsave(&tags->lock, flags); in blk_mq_find_and_get_req() 214 spin_unlock_irqrestore(&tags->lock, flags); in blk_mq_find_and_get_req() 274 unsigned int flags; member [all …]
|
D | blk-mq.h | 59 void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags); 64 unsigned int flags); 108 unsigned int flags, in blk_mq_map_queue() argument 116 if (flags & REQ_HIPRI) in blk_mq_map_queue() 118 else if ((flags & REQ_OP_MASK) == REQ_OP_READ) in blk_mq_map_queue() 156 blk_mq_req_flags_t flags; member 165 static inline bool blk_mq_is_sbitmap_shared(unsigned int flags) in blk_mq_is_sbitmap_shared() argument 167 return flags & BLK_MQ_F_TAG_HCTX_SHARED; in blk_mq_is_sbitmap_shared() 207 if (blk_mq_is_sbitmap_shared(hctx->flags)) in __blk_mq_inc_active_requests() 215 if (blk_mq_is_sbitmap_shared(hctx->flags)) in __blk_mq_dec_active_requests() [all …]
|
D | blk-flush.c | 97 struct blk_flush_queue *fq, unsigned int flags); 220 unsigned long flags = 0; in flush_end_io() local 224 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io() 228 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io() 265 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io() 287 unsigned int flags) in blk_kick_flush() argument 336 flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); in blk_kick_flush() 357 unsigned long flags; in mq_flush_data_end_io() local 369 spin_lock_irqsave(&fq->mq_flush_lock, flags); in mq_flush_data_end_io() 371 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in mq_flush_data_end_io() [all …]
|
D | blk-ioc.c | 47 if (icq->flags & ICQ_EXITED) in ioc_exit_icq() 53 icq->flags |= ICQ_EXITED; in ioc_exit_icq() 87 icq->flags |= ICQ_DESTROYED; in ioc_destroy_icq() 122 if (!(icq->flags & ICQ_DESTROYED)) in ioc_release_fn() 144 unsigned long flags; in put_io_context() local 157 spin_lock_irqsave(&ioc->lock, flags); in put_io_context() 163 spin_unlock_irqrestore(&ioc->lock, flags); in put_io_context() 188 if (icq->flags & ICQ_EXITED) in put_io_context_active() 214 unsigned long flags; in __ioc_clear_queue() local 222 spin_lock_irqsave(&ioc->lock, flags); in __ioc_clear_queue() [all …]
|
D | blk-lib.c | 26 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument 40 if (flags & BLKDEV_DISCARD_SECURE) { in __blkdev_issue_discard() 132 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 139 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard() 248 struct bio **biop, unsigned flags) in __blkdev_issue_write_zeroes() argument 271 if (flags & BLKDEV_ZERO_NOUNMAP) in __blkdev_issue_write_zeroes() 360 unsigned flags) in __blkdev_issue_zeroout() argument 370 biop, flags); in __blkdev_issue_zeroout() 371 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) in __blkdev_issue_zeroout() 393 sector_t nr_sects, gfp_t gfp_mask, unsigned flags) in blkdev_issue_zeroout() argument [all …]
|
D | blk-stat.c | 140 unsigned long flags; in blk_stat_add_callback() local 151 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_add_callback() 154 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_add_callback() 160 unsigned long flags; in blk_stat_remove_callback() local 162 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_remove_callback() 166 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_remove_callback() 189 unsigned long flags; in blk_stat_enable_accounting() local 191 spin_lock_irqsave(&q->stats->lock, flags); in blk_stat_enable_accounting() 194 spin_unlock_irqrestore(&q->stats->lock, flags); in blk_stat_enable_accounting()
|
D | blk-mq-tag.h | 35 int node, unsigned int flags); 36 extern void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags); 39 unsigned int flags); 76 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_busy() 84 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) in blk_mq_tag_idle()
|
D | blk-mq.c | 233 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_quiesce_queue() 298 if (data->flags & BLK_MQ_REQ_PM) in blk_mq_rq_ctx_init() 363 data->flags |= BLK_MQ_REQ_NOWAIT; in __blk_mq_alloc_request() 373 !(data->flags & BLK_MQ_REQ_RESERVED)) in __blk_mq_alloc_request() 392 if (data->flags & BLK_MQ_REQ_NOWAIT) in __blk_mq_alloc_request() 407 blk_mq_req_flags_t flags) in blk_mq_alloc_request() argument 411 .flags = flags, in blk_mq_alloc_request() 417 ret = blk_queue_enter(q, flags); in blk_mq_alloc_request() 435 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) in blk_mq_alloc_request_hctx() argument 439 .flags = flags, in blk_mq_alloc_request_hctx() [all …]
|
D | blk-integrity.c | 274 bi->flags |= BLK_INTEGRITY_VERIFY; in integrity_verify_store() 276 bi->flags &= ~BLK_INTEGRITY_VERIFY; in integrity_verify_store() 283 return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0); in integrity_verify_show() 293 bi->flags |= BLK_INTEGRITY_GENERATE; in integrity_generate_store() 295 bi->flags &= ~BLK_INTEGRITY_GENERATE; in integrity_generate_store() 302 return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0); in integrity_generate_show() 308 (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) != 0); in integrity_device_show() 401 bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | in blk_integrity_register() 402 template->flags; in blk_integrity_register()
|
D | genhd.c | 194 unsigned int flags) in disk_part_iter_init() argument 204 if (flags & DISK_PITER_REVERSE) in disk_part_iter_init() 206 else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) in disk_part_iter_init() 211 piter->flags = flags; in disk_part_iter_init() 240 if (piter->flags & DISK_PITER_REVERSE) { in disk_part_iter_next() 242 if (piter->flags & (DISK_PITER_INCL_PART0 | in disk_part_iter_next() 262 !(piter->flags & DISK_PITER_INCL_EMPTY) && in disk_part_iter_next() 263 !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && in disk_part_iter_next() 735 if (disk->flags & GENHD_FL_HIDDEN) in register_disk() 792 !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN))); in __device_add_disk() [all …]
|
D | blk-wbt.c | 542 enum wbt_flags flags = 0; in bio_to_wbt_flags() local 548 flags = WBT_READ; in bio_to_wbt_flags() 551 flags |= WBT_KSWAPD; in bio_to_wbt_flags() 553 flags |= WBT_DISCARD; in bio_to_wbt_flags() 554 flags |= WBT_TRACKED; in bio_to_wbt_flags() 556 return flags; in bio_to_wbt_flags() 562 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio); in wbt_cleanup() local 563 __wbt_done(rqos, flags); in wbt_cleanup() 575 enum wbt_flags flags; in wbt_wait() local 577 flags = bio_to_wbt_flags(rwb, bio); in wbt_wait() [all …]
|
D | blk-mq-sched.c | 368 if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) || in __blk_mq_sched_bio_merge() 522 unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; in blk_mq_sched_free_tags() local 526 blk_mq_free_rq_map(hctx->sched_tags, flags); in blk_mq_sched_free_tags() 537 unsigned int flags = set->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; in blk_mq_sched_alloc_tags() local 541 set->reserved_tags, flags); in blk_mq_sched_alloc_tags() 560 unsigned int flags = hctx->flags & ~BLK_MQ_F_TAG_HCTX_SHARED; in blk_mq_sched_tags_teardown() local 563 blk_mq_free_rq_map(hctx->sched_tags, flags); in blk_mq_sched_tags_teardown()
|
D | ioctl.c | 75 compat_int_t flags; member 119 unsigned long arg, unsigned long flags) in blk_ioctl_discard() argument 151 GFP_KERNEL, flags); in blk_ioctl_discard() 281 if (reg.flags & ~PR_FL_IGNORE_KEY) in blkdev_pr_register() 283 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags); in blkdev_pr_register() 299 if (rsv.flags & ~PR_FL_IGNORE_KEY) in blkdev_pr_reserve() 301 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve() 317 if (rsv.flags) in blkdev_pr_release() 335 if (p.flags) in blkdev_pr_preempt() 353 if (c.flags) in blkdev_pr_clear()
|
D | bio.c | 304 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); in bio_reset() local 309 bio->bi_flags = flags; in bio_reset() 536 unsigned long flags; in zero_fill_bio_iter() local 541 char *data = bvec_kmap_irq(&bv, &flags); in zero_fill_bio_iter() 544 bvec_kunmap_irq(data, &flags); in zero_fill_bio_iter() 1375 unsigned long flags; in bio_check_pages_dirty() local 1387 spin_lock_irqsave(&bio_dirty_lock, flags); in bio_check_pages_dirty() 1390 spin_unlock_irqrestore(&bio_dirty_lock, flags); in bio_check_pages_dirty() 1590 int flags) in bioset_init() argument 1607 if ((flags & BIOSET_NEED_BVECS) && in bioset_init() [all …]
|
D | keyslot-manager.c | 174 unsigned long flags; in blk_ksm_remove_slot_from_lru_list() local 176 spin_lock_irqsave(&ksm->idle_slots_lock, flags); in blk_ksm_remove_slot_from_lru_list() 178 spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); in blk_ksm_remove_slot_from_lru_list() 308 unsigned long flags; in blk_ksm_put_slot() local 316 &ksm->idle_slots_lock, flags)) { in blk_ksm_put_slot() 318 spin_unlock_irqrestore(&ksm->idle_slots_lock, flags); in blk_ksm_put_slot()
|
D | blk-iocost.c | 197 unsigned long flags; \ 199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \ 204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \ 738 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags) in iocg_lock() argument 741 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock() 744 spin_lock_irqsave(&iocg->waitq.lock, *flags); in iocg_lock() 748 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags) in iocg_unlock() argument 752 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock() 754 spin_unlock_irqrestore(&iocg->waitq.lock, *flags); in iocg_unlock() 1401 int flags, void *key) in iocg_wake_fn() argument [all …]
|
D | cmdline-parser.c | 60 new_subpart->flags = 0; in parse_subpart() 63 new_subpart->flags |= PF_RDONLY; in parse_subpart() 68 new_subpart->flags |= PF_POWERUP_LOCK; in parse_subpart()
|
D | mq-deadline-main.c | 308 unsigned long flags; in deadline_fifo_request() local 321 spin_lock_irqsave(&dd->zone_lock, flags); in deadline_fifo_request() 328 spin_unlock_irqrestore(&dd->zone_lock, flags); in deadline_fifo_request() 342 unsigned long flags; in deadline_next_request() local 355 spin_lock_irqsave(&dd->zone_lock, flags); in deadline_next_request() 361 spin_unlock_irqrestore(&dd->zone_lock, flags); in deadline_next_request() 837 unsigned long flags; in dd_finish_request() local 839 spin_lock_irqsave(&dd->zone_lock, flags); in dd_finish_request() 841 spin_unlock_irqrestore(&dd->zone_lock, flags); in dd_finish_request()
|
D | blk-iolatency.c | 531 unsigned long flags; in iolatency_check_latencies() local 558 spin_lock_irqsave(&lat_info->lock, flags); in iolatency_check_latencies() 590 spin_unlock_irqrestore(&lat_info->lock, flags); in iolatency_check_latencies() 673 unsigned long flags; in blkiolatency_timer_fn() local 693 spin_lock_irqsave(&lat_info->lock, flags); in blkiolatency_timer_fn() 715 spin_unlock_irqrestore(&lat_info->lock, flags); in blkiolatency_timer_fn() 1036 .flags = CFTYPE_NOT_ON_ROOT,
|
D | bfq-cgroup.c | 106 stats->flags |= (1 << BFQG_stats_##name); \ 110 stats->flags &= ~(1 << BFQG_stats_##name); \ 114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ 913 unsigned long flags; in bfq_pd_offline() local 916 spin_lock_irqsave(&bfqd->lock, flags); in bfq_pd_offline() 962 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_pd_offline() 1292 .flags = CFTYPE_NOT_ON_ROOT, 1298 .flags = CFTYPE_NOT_ON_ROOT, 1418 .flags = CFTYPE_NOT_ON_ROOT,
|
D | bio-integrity.c | 235 !(bi->flags & BLK_INTEGRITY_VERIFY)) in bio_integrity_prep() 239 !(bi->flags & BLK_INTEGRITY_GENERATE)) in bio_integrity_prep() 270 if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM) in bio_integrity_prep()
|
D | scsi_ioctl.c | 310 if (hdr->flags & SG_FLAG_Q_AT_HEAD) in sg_io() 563 .flags = hdr->flags, in put_sg_io_hdr() 611 .flags = hdr32.flags, in get_sg_io_hdr()
|
D | badblocks.c | 169 unsigned long flags; in badblocks_set() local 185 write_seqlock_irqsave(&bb->lock, flags); in badblocks_set() 311 write_sequnlock_irqrestore(&bb->lock, flags); in badblocks_set()
|
/block/partitions/ |
D | sun.c | 36 __be16 flags; in sun_partition() member 117 state->parts[slot].flags = 0; in sun_partition() 120 state->parts[slot].flags |= ADDPART_FLAG_RAID; in sun_partition() 122 state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK; in sun_partition()
|
D | core.c | 371 sector_t start, sector_t len, int flags, in add_partition() argument 466 if (flags & ADDPART_FLAG_WHOLEDISK) { in add_partition() 474 if (flags & ADDPART_FLAG_WHOLEDISK) in add_partition() 623 !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) { in disk_unlock_native_capacity() 626 disk->flags |= GENHD_FL_NATIVE_CAPACITY; in disk_unlock_native_capacity() 692 part = add_partition(disk, p, from, size, state->parts[p].flags, in blk_add_partition() 701 (state->parts[p].flags & ADDPART_FLAG_RAID)) in blk_add_partition()
|