/block/ |
D | blk-ioc.c | 48 if (icq->flags & ICQ_EXITED) in ioc_exit_icq() 56 icq->flags |= ICQ_EXITED; in ioc_exit_icq() 101 unsigned long flags; in ioc_release_fn() local 109 spin_lock_irqsave_nested(&ioc->lock, flags, 1); in ioc_release_fn() 120 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_release_fn() 122 spin_lock_irqsave_nested(&ioc->lock, flags, 1); in ioc_release_fn() 126 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_release_fn() 140 unsigned long flags; in put_io_context() local 153 spin_lock_irqsave(&ioc->lock, flags); in put_io_context() 159 spin_unlock_irqrestore(&ioc->lock, flags); in put_io_context() [all …]
|
D | blk-lib.c | 27 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument 40 if (flags & BLKDEV_DISCARD_SECURE) { in __blkdev_issue_discard() 130 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument 137 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard() 243 struct bio **biop, unsigned flags) in __blkdev_issue_write_zeroes() argument 263 if (flags & BLKDEV_ZERO_NOUNMAP) in __blkdev_issue_write_zeroes() 349 unsigned flags) in __blkdev_issue_zeroout() argument 359 biop, flags); in __blkdev_issue_zeroout() 360 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) in __blkdev_issue_zeroout() 382 sector_t nr_sects, gfp_t gfp_mask, unsigned flags) in blkdev_issue_zeroout() argument [all …]
|
D | blk-softirq.c | 45 unsigned long flags; in trigger_softirq() local 48 local_irq_save(flags); in trigger_softirq() 55 local_irq_restore(flags); in trigger_softirq() 68 data->flags = 0; in raise_blk_irq() 102 unsigned long flags; in __blk_complete_request() local 107 local_irq_save(flags); in __blk_complete_request() 145 local_irq_restore(flags); in __blk_complete_request()
|
D | blk-integrity.c | 298 bi->flags |= BLK_INTEGRITY_VERIFY; in integrity_verify_store() 300 bi->flags &= ~BLK_INTEGRITY_VERIFY; in integrity_verify_store() 307 return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_VERIFY) != 0); in integrity_verify_show() 317 bi->flags |= BLK_INTEGRITY_GENERATE; in integrity_generate_store() 319 bi->flags &= ~BLK_INTEGRITY_GENERATE; in integrity_generate_store() 326 return sprintf(page, "%d\n", (bi->flags & BLK_INTEGRITY_GENERATE) != 0); in integrity_generate_show() 332 (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) != 0); in integrity_device_show() 413 bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE | in blk_integrity_register() 414 template->flags; in blk_integrity_register()
|
D | blk-mq.c | 215 unsigned long flags; in blk_mq_quiesce_queue_nowait() local 217 spin_lock_irqsave(q->queue_lock, flags); in blk_mq_quiesce_queue_nowait() 219 spin_unlock_irqrestore(q->queue_lock, flags); in blk_mq_quiesce_queue_nowait() 241 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_quiesce_queue() 260 unsigned long flags; in blk_mq_unquiesce_queue() local 262 spin_lock_irqsave(q->queue_lock, flags); in blk_mq_unquiesce_queue() 264 spin_unlock_irqrestore(q->queue_lock, flags); in blk_mq_unquiesce_queue() 302 if (data->flags & BLK_MQ_REQ_INTERNAL) { in blk_mq_rq_ctx_init() 369 data->flags |= BLK_MQ_REQ_NOWAIT; in blk_mq_get_request() 372 data->flags |= BLK_MQ_REQ_INTERNAL; in blk_mq_get_request() [all …]
|
D | genhd.c | 146 unsigned int flags) in disk_part_iter_init() argument 156 if (flags & DISK_PITER_REVERSE) in disk_part_iter_init() 158 else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) in disk_part_iter_init() 163 piter->flags = flags; in disk_part_iter_init() 192 if (piter->flags & DISK_PITER_REVERSE) { in disk_part_iter_next() 194 if (piter->flags & (DISK_PITER_INCL_PART0 | in disk_part_iter_next() 212 !(piter->flags & DISK_PITER_INCL_EMPTY) && in disk_part_iter_next() 213 !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && in disk_part_iter_next() 654 WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT)); in device_add_disk() 656 disk->flags |= GENHD_FL_UP; in device_add_disk() [all …]
|
D | bsg.c | 48 unsigned long flags; member 93 unsigned long flags; in bsg_free_command() local 97 spin_lock_irqsave(&bd->lock, flags); in bsg_free_command() 99 spin_unlock_irqrestore(&bd->lock, flags); in bsg_free_command() 300 unsigned long flags; in bsg_rq_end_io() local 307 spin_lock_irqsave(&bd->lock, flags); in bsg_rq_end_io() 310 spin_unlock_irqrestore(&bd->lock, flags); in bsg_rq_end_io() 322 int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); in bsg_add_command() 370 if (!test_bit(BSG_F_BLOCK, &bd->flags)) { in bsg_get_done_cmd() 459 spin = !test_bit(BSG_F_BLOCK, &bd->flags); in bsg_complete() [all …]
|
D | ioctl.c | 201 unsigned long arg, unsigned long flags) in blk_ioctl_discard() argument 224 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); in blk_ioctl_discard() 321 if (reg.flags & ~PR_FL_IGNORE_KEY) in blkdev_pr_register() 323 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags); in blkdev_pr_register() 339 if (rsv.flags & ~PR_FL_IGNORE_KEY) in blkdev_pr_reserve() 341 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve() 357 if (rsv.flags) in blkdev_pr_release() 375 if (p.flags) in blkdev_pr_preempt() 393 if (c.flags) in blkdev_pr_clear()
|
D | blk-flush.c | 225 unsigned long flags = 0; in flush_end_io() local 232 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io() 272 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io() 392 unsigned long flags; in mq_flush_data_end_io() local 401 spin_lock_irqsave(&fq->mq_flush_lock, flags); in mq_flush_data_end_io() 403 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in mq_flush_data_end_io()
|
D | blk-timeout.c | 133 unsigned long flags, next = 0; in blk_timeout_work() local 137 spin_lock_irqsave(q->queue_lock, flags); in blk_timeout_work() 145 spin_unlock_irqrestore(q->queue_lock, flags); in blk_timeout_work()
|
D | blk-mq-tag.c | 71 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) in hctx_may_queue() 96 if (!(data->flags & BLK_MQ_REQ_INTERNAL) && in __blk_mq_get_tag() 115 if (data->flags & BLK_MQ_REQ_RESERVED) { in blk_mq_get_tag() 131 if (data->flags & BLK_MQ_REQ_NOWAIT) in blk_mq_get_tag() 166 if (data->flags & BLK_MQ_REQ_RESERVED) in blk_mq_get_tag()
|
D | partition-generic.c | 302 sector_t start, sector_t len, int flags, in add_partition() argument 380 if (flags & ADDPART_FLAG_WHOLEDISK) { in add_partition() 388 if (flags & ADDPART_FLAG_WHOLEDISK) in add_partition() 423 !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) { in disk_unlock_native_capacity() 426 disk->flags |= GENHD_FL_NATIVE_CAPACITY; in disk_unlock_native_capacity() 618 state->parts[p].flags, in rescan_partitions() 626 if (state->parts[p].flags & ADDPART_FLAG_RAID) in rescan_partitions()
|
D | blk-mq-tag.h | 63 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_busy() 71 if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) in blk_mq_tag_idle()
|
D | bio.c | 301 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); in bio_reset() local 306 bio->bi_flags = flags; in bio_reset() 534 unsigned long flags; in zero_fill_bio() local 539 char *data = bvec_kmap_irq(&bv, &flags); in zero_fill_bio() 542 bvec_kunmap_irq(data, &flags); in zero_fill_bio() 1766 unsigned long flags; in bio_dirty_fn() local 1769 spin_lock_irqsave(&bio_dirty_lock, flags); in bio_dirty_fn() 1772 spin_unlock_irqrestore(&bio_dirty_lock, flags); in bio_dirty_fn() 1802 unsigned long flags; in bio_check_pages_dirty() local 1804 spin_lock_irqsave(&bio_dirty_lock, flags); in bio_check_pages_dirty() [all …]
|
D | cmdline-parser.c | 60 new_subpart->flags = 0; in parse_subpart() 63 new_subpart->flags |= PF_RDONLY; in parse_subpart() 68 new_subpart->flags |= PF_POWERUP_LOCK; in parse_subpart()
|
D | bounce.c | 66 unsigned long flags; in bounce_copy_vec() local 69 local_irq_save(flags); in bounce_copy_vec() 73 local_irq_restore(flags); in bounce_copy_vec()
|
D | blk-mq.h | 112 unsigned int flags; member 122 if (data->flags & BLK_MQ_REQ_INTERNAL) in blk_mq_tags_from_data()
|
D | bfq-cgroup.c | 39 stats->flags |= (1 << BFQG_stats_##name); \ 43 stats->flags &= ~(1 << BFQG_stats_##name); \ 47 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ 750 unsigned long flags; in bfq_pd_offline() local 753 spin_lock_irqsave(&bfqd->lock, flags); in bfq_pd_offline() 792 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_pd_offline() 1030 .flags = CFTYPE_NOT_ON_ROOT, 1146 .flags = CFTYPE_NOT_ON_ROOT,
|
D | blk-core.c | 434 unsigned long flags; in blk_run_queue() local 438 spin_lock_irqsave(q->queue_lock, flags); in blk_run_queue() 440 spin_unlock_irqrestore(q->queue_lock, flags); in blk_run_queue() 1607 unsigned long flags; in blk_put_request() local 1609 spin_lock_irqsave(q->queue_lock, flags); in blk_put_request() 1611 spin_unlock_irqrestore(q->queue_lock, flags); in blk_put_request() 2352 unsigned long flags; in blk_insert_cloned_request() local 2374 spin_lock_irqsave(q->queue_lock, flags); in blk_insert_cloned_request() 2376 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request() 2392 spin_unlock_irqrestore(q->queue_lock, flags); in blk_insert_cloned_request() [all …]
|
D | blk-mq-sched.c | 62 if (hctx->flags & BLK_MQ_F_TAG_SHARED) { in blk_mq_sched_mark_restart_hctx() 76 if (hctx->flags & BLK_MQ_F_TAG_SHARED) { in blk_mq_sched_restart_hctx() 239 if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) && in __blk_mq_sched_bio_merge() 311 if (set->flags & BLK_MQ_F_TAG_SHARED) { in blk_mq_sched_restart()
|
D | compat_ioctl.c | 179 compat_int_t flags; member 194 err |= get_user(n, &ua32->flags); in compat_blkpg_ioctl() 195 err |= put_user(n, &a->flags); in compat_blkpg_ioctl()
|
D | bio-integrity.c | 263 !(bi->flags & BLK_INTEGRITY_VERIFY)) in bio_integrity_prep() 267 !(bi->flags & BLK_INTEGRITY_GENERATE)) in bio_integrity_prep() 298 if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM) in bio_integrity_prep()
|
D | bfq-iosched.c | 116 __set_bit(BFQQF_##name, &(bfqq)->flags); \ 120 __clear_bit(BFQQF_##name, &(bfqq)->flags); \ 124 return test_bit(BFQQF_##name, &(bfqq)->flags); \ 282 unsigned long flags; in bfq_bic_lookup() local 285 spin_lock_irqsave(q->queue_lock, flags); in bfq_bic_lookup() 287 spin_unlock_irqrestore(q->queue_lock, flags); in bfq_bic_lookup() 3766 unsigned long flags; in bfq_exit_icq_bfqq() local 3768 spin_lock_irqsave(&bfqd->lock, flags); in bfq_exit_icq_bfqq() 3772 spin_unlock_irqrestore(&bfqd->lock, flags); in bfq_exit_icq_bfqq() 4367 unsigned long flags; in bfq_finish_request() local [all …]
|
D | blk-throttle.c | 140 unsigned int flags; member 690 tg->flags |= THROTL_TG_PENDING; in __throtl_enqueue_tg() 696 if (!(tg->flags & THROTL_TG_PENDING)) in throtl_enqueue_tg() 703 tg->flags &= ~THROTL_TG_PENDING; in __throtl_dequeue_tg() 708 if (tg->flags & THROTL_TG_PENDING) in throtl_dequeue_tg() 1088 tg->flags |= THROTL_TG_WAS_EMPTY; in throtl_add_bio_tg() 1119 tg->flags &= ~THROTL_TG_WAS_EMPTY; in tg_update_disptime() 1295 if (tg->flags & THROTL_TG_WAS_EMPTY) { in throtl_pending_timer_fn() 1433 if (tg->flags & THROTL_TG_PENDING) { in tg_conf_updated() 1712 .flags = CFTYPE_NOT_ON_ROOT, [all …]
|
/block/partitions/ |
D | sun.c | 29 __be16 flags; in sun_partition() member 110 state->parts[slot].flags = 0; in sun_partition() 113 state->parts[slot].flags |= ADDPART_FLAG_RAID; in sun_partition() 115 state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK; in sun_partition()
|