Lines Matching refs:data
278 static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, in blk_mq_rq_ctx_init() argument
281 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); in blk_mq_rq_ctx_init()
284 if (data->q->elevator) { in blk_mq_rq_ctx_init()
293 rq->q = data->q; in blk_mq_rq_ctx_init()
294 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
295 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
297 rq->cmd_flags = data->cmd_flags; in blk_mq_rq_ctx_init()
298 if (data->flags & BLK_MQ_REQ_PM) in blk_mq_rq_ctx_init()
300 if (blk_queue_io_stat(data->q)) in blk_mq_rq_ctx_init()
329 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; in blk_mq_rq_ctx_init()
332 if (!op_is_flush(data->cmd_flags)) { in blk_mq_rq_ctx_init()
333 struct elevator_queue *e = data->q->elevator; in blk_mq_rq_ctx_init()
345 data->hctx->queued++; in blk_mq_rq_ctx_init()
346 trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns); in blk_mq_rq_ctx_init()
350 static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data) in __blk_mq_alloc_request() argument
352 struct request_queue *q = data->q; in __blk_mq_alloc_request()
362 if (data->cmd_flags & REQ_NOWAIT) in __blk_mq_alloc_request()
363 data->flags |= BLK_MQ_REQ_NOWAIT; in __blk_mq_alloc_request()
371 if (!op_is_flush(data->cmd_flags) && in __blk_mq_alloc_request()
373 !(data->flags & BLK_MQ_REQ_RESERVED)) in __blk_mq_alloc_request()
374 e->type->ops.limit_depth(data->cmd_flags, data); in __blk_mq_alloc_request()
378 data->ctx = blk_mq_get_ctx(q); in __blk_mq_alloc_request()
379 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_request()
381 blk_mq_tag_busy(data->hctx); in __blk_mq_alloc_request()
388 trace_android_rvh_internal_blk_mq_alloc_request(&skip, &tag, data); in __blk_mq_alloc_request()
390 tag = blk_mq_get_tag(data); in __blk_mq_alloc_request()
392 if (data->flags & BLK_MQ_REQ_NOWAIT) in __blk_mq_alloc_request()
403 return blk_mq_rq_ctx_init(data, tag, alloc_time_ns); in __blk_mq_alloc_request()
409 struct blk_mq_alloc_data data = { in blk_mq_alloc_request() local
421 rq = __blk_mq_alloc_request(&data); in blk_mq_alloc_request()
437 struct blk_mq_alloc_data data = { in blk_mq_alloc_request_hctx() local
473 data.hctx = q->queue_hw_ctx[hctx_idx]; in blk_mq_alloc_request_hctx()
474 if (!blk_mq_hw_queue_mapped(data.hctx)) in blk_mq_alloc_request_hctx()
476 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx()
479 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx()
482 blk_mq_tag_busy(data.hctx); in blk_mq_alloc_request_hctx()
485 tag = blk_mq_get_tag(&data); in blk_mq_alloc_request_hctx()
488 return blk_mq_rq_ctx_init(&data, tag, alloc_time_ns); in blk_mq_alloc_request_hctx()
640 static void __blk_mq_complete_request_remote(void *data) in __blk_mq_complete_request_remote() argument
642 struct request *rq = data; in __blk_mq_complete_request_remote()
1028 static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data) in flush_busy_ctx() argument
1030 struct flush_busy_ctx_data *flush_data = data; in flush_busy_ctx()
1048 struct flush_busy_ctx_data data = { in blk_mq_flush_busy_ctxs() local
1053 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); in blk_mq_flush_busy_ctxs()
1063 void *data) in dispatch_rq_from_ctx() argument
1065 struct dispatch_rq_data *dispatch_data = data; in dispatch_rq_from_ctx()
1086 struct dispatch_rq_data data = { in blk_mq_dequeue_from_ctx() local
1092 dispatch_rq_from_ctx, &data); in blk_mq_dequeue_from_ctx()
1094 return data.rq; in blk_mq_dequeue_from_ctx()
2255 struct blk_mq_alloc_data data = { in blk_mq_submit_bio() local
2280 data.cmd_flags = bio->bi_opf; in blk_mq_submit_bio()
2281 rq = __blk_mq_alloc_request(&data); in blk_mq_submit_bio()
2293 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_submit_bio()
2309 blk_mq_run_hw_queue(data.hctx, true); in blk_mq_submit_bio()
2356 data.hctx = same_queue_rq->mq_hctx; in blk_mq_submit_bio()
2358 blk_mq_try_issue_directly(data.hctx, same_queue_rq, in blk_mq_submit_bio()
2362 !data.hctx->dispatch_busy) { in blk_mq_submit_bio()
2367 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_submit_bio()
2598 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) in blk_mq_has_request() argument
2600 struct rq_iter_data *iter_data = data; in blk_mq_has_request()
2612 struct rq_iter_data data = { in blk_mq_hctx_has_requests() local
2616 blk_mq_all_tag_iter(tags, blk_mq_has_request, &data); in blk_mq_hctx_has_requests()
2617 return data.has_rq; in blk_mq_hctx_has_requests()
3900 struct request_queue *q = cb->data; in blk_mq_poll_stats_fn()