Home
last modified time | relevance | path

Searched refs:rq_depth (Results 1 – 21 of 21) sorted by relevance

/kernel/linux/linux-5.10/block/
Dblk-wbt.c238 struct rq_depth *rqd = &rwb->rq_depth; in latency_exceeded()
291 struct rq_depth *rqd = &rwb->rq_depth; in rwb_trace_step()
301 } else if (rwb->rq_depth.max_depth <= 2) { in calc_wb_limits()
302 rwb->wb_normal = rwb->rq_depth.max_depth; in calc_wb_limits()
305 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2; in calc_wb_limits()
306 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4; in calc_wb_limits()
312 if (!rq_depth_scale_up(&rwb->rq_depth)) in scale_up()
322 if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle)) in scale_down()
331 struct rq_depth *rqd = &rwb->rq_depth; in rwb_arm_timer()
356 struct rq_depth *rqd = &rwb->rq_depth; in wb_timer_fn()
[all …]
Dblk-iolatency.c138 struct rq_depth rq_depth; member
277 return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth); in iolat_acquire_inflight()
369 unsigned long old = iolat->rq_depth.max_depth; in scale_change()
381 iolat->rq_depth.max_depth = old; in scale_change()
386 iolat->rq_depth.max_depth = max(old, 1UL); in scale_change()
444 if (iolat->rq_depth.max_depth == 1 && direction < 0) { in check_scale_change()
452 iolat->rq_depth.max_depth = UINT_MAX; in check_scale_change()
507 if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) { in iolatency_record_time()
908 if (iolat->rq_depth.max_depth == UINT_MAX) in iolatency_ssd_stat()
915 iolat->rq_depth.max_depth); in iolatency_ssd_stat()
[all …]
Dblk-rq-qos.h51 struct rq_depth { struct
154 bool rq_depth_scale_up(struct rq_depth *rqd);
155 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
156 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
Dblk-wbt.h70 struct rq_depth rq_depth; member
Dblk-rq-qos.c116 bool rq_depth_calc_max_depth(struct rq_depth *rqd) in rq_depth_calc_max_depth()
164 bool rq_depth_scale_up(struct rq_depth *rqd) in rq_depth_scale_up()
183 bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle) in rq_depth_scale_down()
/kernel/linux/linux-5.10/net/9p/
Dtrans_rdma.c88 int rq_depth; member
127 int rq_depth; member
159 if (rdma->rq_depth != P9_RDMA_RQ_DEPTH) in p9_rdma_show_options()
160 seq_printf(m, ",rq=%u", rdma->rq_depth); in p9_rdma_show_options()
184 opts->rq_depth = P9_RDMA_RQ_DEPTH; in parse_opts()
221 opts->rq_depth = option; in parse_opts()
234 opts->rq_depth = max(opts->rq_depth, opts->sq_depth); in parse_opts()
574 rdma->rq_depth = opts->rq_depth; in alloc_rdma()
579 sema_init(&rdma->rq_sem, rdma->rq_depth); in alloc_rdma()
690 opts.sq_depth + opts.rq_depth + 1, in rdma_create_trans()
[all …]
/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/
Dsvc_rdma_transport.c373 unsigned int ctxts, rq_depth; in svc_rdma_accept() local
407 rq_depth = newxprt->sc_max_requests + newxprt->sc_max_bc_requests; in svc_rdma_accept()
408 if (rq_depth > dev->attrs.max_qp_wr) { in svc_rdma_accept()
411 rq_depth = dev->attrs.max_qp_wr; in svc_rdma_accept()
412 newxprt->sc_max_requests = rq_depth - 2; in svc_rdma_accept()
418 newxprt->sc_sq_depth = rq_depth + ctxts; in svc_rdma_accept()
436 ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE); in svc_rdma_accept()
446 qp_attr.cap.max_recv_wr = rq_depth; in svc_rdma_accept()
/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_dev.c278 unsigned int rq_depth) in set_hw_ioctxt() argument
292 hw_ioctxt.rq_depth = ilog2(rq_depth); in set_hw_ioctxt()
445 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth) in hinic_hwdev_ifup() argument
469 func_to_io->rq_depth = rq_depth; in hinic_hwdev_ifup()
496 err = set_hw_ioctxt(hwdev, sq_depth, rq_depth); in hinic_hwdev_ifup()
Dhinic_hw_io.h74 u16 rq_depth; member
Dhinic_dev.h96 u16 rq_depth; member
Dhinic_hw_dev.h220 u16 rq_depth; member
561 int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth);
Dhinic_ethtool.c563 ring->rx_pending = nic_dev->rq_depth; in hinic_get_ringparam()
604 new_rq_depth == nic_dev->rq_depth) in hinic_set_ringparam()
609 nic_dev->sq_depth, nic_dev->rq_depth, in hinic_set_ringparam()
613 nic_dev->rq_depth = new_rq_depth; in hinic_set_ringparam()
Dhinic_hw_mbox.c1294 ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \
1295 (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \
1305 if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && in hw_ctxt_qps_param_valid()
Dhinic_hw_io.c293 func_to_io->rq_depth, HINIC_RQ_WQE_SIZE); in init_qp()
Dhinic_main.c446 nic_dev->rq_depth); in hinic_open()
1237 nic_dev->rq_depth = HINIC_RQ_DEPTH; in nic_dev_init()
Dhinic_port.c476 rq_num.rq_depth = ilog2(nic_dev->rq_depth); in hinic_set_max_qnum()
Dhinic_port.h316 u32 rq_depth; member
/kernel/linux/linux-5.10/drivers/infiniband/hw/efa/
Defa_com_cmd.h26 u32 rq_depth; member
Defa_com_cmd.c38 params->rq_depth; in efa_com_create_qp()
Defa_verbs.c685 create_qp_params.rq_depth = init_attr->cap.max_recv_wr; in efa_create_qp()
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/
Dhns_roce_hw_v2.h1627 __le16 rq_depth; member