Home
last modified time | relevance | path

Searched refs:opf (Results 1 – 11 of 11) sorted by relevance

/block/
Dblk-cgroup-rwstat.h62 blk_opf_t opf, uint64_t val) in blkg_rwstat_add() argument
66 if (op_is_discard(opf)) in blkg_rwstat_add()
68 else if (op_is_write(opf)) in blkg_rwstat_add()
75 if (op_is_sync(opf)) in blkg_rwstat_add()
Dblk-wbt.c534 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf) in get_limit() argument
538 if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD) in get_limit()
549 if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd()) in get_limit()
551 else if ((opf & REQ_BACKGROUND) || close_io(rwb)) { in get_limit()
566 blk_opf_t opf; member
572 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf)); in wbt_inflight_cb()
586 blk_opf_t opf) in __wbt_wait() argument
592 .opf = opf, in __wbt_wait()
Dbio.c247 unsigned short max_vecs, blk_opf_t opf) in bio_init() argument
251 bio->bi_opf = opf; in bio_init()
304 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) in bio_reset() argument
312 bio->bi_opf = opf; in bio_reset()
353 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) in blk_next_bio() argument
355 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); in blk_next_bio()
437 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, in bio_alloc_percpu_cache() argument
457 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); in bio_alloc_percpu_cache()
497 blk_opf_t opf, gfp_t gfp_mask, in bio_alloc_bioset() argument
508 if (opf & REQ_ALLOC_CACHE) { in bio_alloc_bioset()
[all …]
Dblk-mq.h85 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) in blk_mq_get_hctx_type() argument
92 if (opf & REQ_POLLED) in blk_mq_get_hctx_type()
94 else if ((opf & REQ_OP_MASK) == REQ_OP_READ) in blk_mq_get_hctx_type()
106 blk_opf_t opf, in blk_mq_map_queue() argument
109 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
Dfops.c29 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in dio_bio_write_op() local
33 opf |= REQ_FUA; in dio_bio_write_op()
34 return opf; in dio_bio_write_op()
171 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO() local
179 opf |= REQ_ALLOC_CACHE; in __blkdev_direct_IO()
180 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO()
253 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); in __blkdev_direct_IO()
309 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO_async() local
319 opf |= REQ_ALLOC_CACHE; in __blkdev_direct_IO_async()
320 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO_async()
Dbfq-cgroup.c223 blk_opf_t opf) in bfqg_stats_update_io_add() argument
225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1); in bfqg_stats_update_io_add()
231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_remove() argument
233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1); in bfqg_stats_update_io_remove()
236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_merged() argument
238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1); in bfqg_stats_update_io_merged()
242 u64 io_start_time_ns, blk_opf_t opf) in bfqg_stats_update_completion() argument
248 blkg_rwstat_add(&stats->service_time, opf, in bfqg_stats_update_completion()
251 blkg_rwstat_add(&stats->wait_time, opf, in bfqg_stats_update_completion()
257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_remove() argument
[all …]
Dbfq-iosched.h1071 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf);
1072 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf);
1074 u64 io_start_time_ns, blk_opf_t opf);
1082 blk_opf_t opf);
Dkyber-iosched.c196 static unsigned int kyber_sched_domain(blk_opf_t opf) in kyber_sched_domain() argument
198 switch (opf & REQ_OP_MASK) { in kyber_sched_domain()
554 static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) in kyber_limit_depth() argument
560 if (!op_is_sync(opf)) { in kyber_limit_depth()
Dblk-mq.c519 blk_opf_t opf, in blk_mq_rq_cache_fill() argument
525 .cmd_flags = opf, in blk_mq_rq_cache_fill()
543 blk_opf_t opf, in blk_mq_alloc_cached_request() argument
555 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); in blk_mq_alloc_cached_request()
563 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) in blk_mq_alloc_cached_request()
565 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) in blk_mq_alloc_cached_request()
572 rq->cmd_flags = opf; in blk_mq_alloc_cached_request()
577 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, in blk_mq_alloc_request() argument
582 rq = blk_mq_alloc_cached_request(q, opf, flags); in blk_mq_alloc_request()
587 .cmd_flags = opf, in blk_mq_alloc_request()
[all …]
Dmq-deadline.c642 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) in dd_limit_depth() argument
647 if (op_is_sync(opf) && !op_is_write(opf)) in dd_limit_depth()
Dbfq-iosched.c690 static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) in bfq_limit_depth() argument
699 if (op_is_sync(opf) && !op_is_write(opf)) { in bfq_limit_depth()
702 depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(opf)]; in bfq_limit_depth()
708 bic_to_bfqq(bic, op_is_sync(opf), act_idx); in bfq_limit_depth()
722 __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth); in bfq_limit_depth()