Home
last modified time | relevance | path

Searched refs:rw (Results 1 – 7 of 7) sorted by relevance

/block/
Dblk-throttle.c295 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) in tg_bps_limit() argument
305 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
309 tg->iops[rw][td->limit_index]) in tg_bps_limit()
315 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
316 tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { in tg_bps_limit()
319 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
320 ret = min(tg->bps[rw][LIMIT_MAX], adjusted); in tg_bps_limit()
325 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) in tg_iops_limit() argument
335 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
339 tg->bps[rw][td->limit_index]) in tg_iops_limit()
[all …]
Dblk-wbt.c479 static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) in get_limit() argument
491 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd()) in get_limit()
493 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) { in get_limit()
506 wait_queue_entry_t *wait, unsigned long rw) in may_queue() argument
526 return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw)); in may_queue()
533 static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock) in __wbt_wait() argument
540 if (may_queue(rwb, rqw, &wait, rw)) in __wbt_wait()
547 if (may_queue(rwb, rqw, &wait, rw)) in __wbt_wait()
Dbounce.c197 int rw = bio_data_dir(*bio_orig); in __blk_queue_bounce() local
232 if (rw == WRITE) { in __blk_queue_bounce()
250 if (rw == READ) in __blk_queue_bounce()
254 if (rw == READ) in __blk_queue_bounce()
Dbio.c1814 void generic_start_io_acct(struct request_queue *q, int rw, in generic_start_io_acct() argument
1820 part_stat_inc(cpu, part, ios[rw]); in generic_start_io_acct()
1821 part_stat_add(cpu, part, sectors[rw], sectors); in generic_start_io_acct()
1822 part_inc_in_flight(q, part, rw); in generic_start_io_acct()
1828 void generic_end_io_acct(struct request_queue *q, int rw, in generic_end_io_acct() argument
1834 part_stat_add(cpu, part, ticks[rw], duration); in generic_end_io_acct()
1836 part_dec_in_flight(q, part, rw); in generic_end_io_acct()
Dgenhd.c48 void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw) in part_inc_in_flight() argument
53 atomic_inc(&part->in_flight[rw]); in part_inc_in_flight()
55 atomic_inc(&part_to_disk(part)->part0.in_flight[rw]); in part_inc_in_flight()
58 void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw) in part_dec_in_flight() argument
63 atomic_dec(&part->in_flight[rw]); in part_dec_in_flight()
65 atomic_dec(&part_to_disk(part)->part0.in_flight[rw]); in part_dec_in_flight()
Dblk-core.c2442 const int rw = rq_data_dir(req); in blk_account_io_completion() local
2448 part_stat_add(cpu, part, sectors[rw], bytes >> 9); in blk_account_io_completion()
2462 const int rw = rq_data_dir(req); in blk_account_io_done() local
2469 part_stat_inc(cpu, part, ios[rw]); in blk_account_io_done()
2470 part_stat_add(cpu, part, ticks[rw], duration); in blk_account_io_done()
2472 part_dec_in_flight(req->q, part, rw); in blk_account_io_done()
2504 int rw = rq_data_dir(rq); in blk_account_io_start() local
2514 part_stat_inc(cpu, part, merges[rw]); in blk_account_io_start()
2530 part_inc_in_flight(rq->q, part, rw); in blk_account_io_start()
Dcfq-iosched.c4434 const int rw = rq_data_dir(rq); in cfq_put_request() local
4436 BUG_ON(!cfqq->allocated[rw]); in cfq_put_request()
4437 cfqq->allocated[rw]--; in cfq_put_request()
4489 const int rw = rq_data_dir(rq); in cfq_set_request() local
4525 cfqq->allocated[rw]++; in cfq_set_request()