/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tx.c | 58 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, in mlx5e_dma_push() argument 63 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; in mlx5e_dma_push() 65 sq->db.dma_fifo[i].addr = addr; in mlx5e_dma_push() 66 sq->db.dma_fifo[i].size = size; in mlx5e_dma_push() 67 sq->db.dma_fifo[i].type = map_type; in mlx5e_dma_push() 68 sq->dma_fifo_pc++; in mlx5e_dma_push() 71 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) in mlx5e_dma_get() argument 73 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; in mlx5e_dma_get() 76 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) in mlx5e_dma_unmap_wqe_err() argument 82 mlx5e_dma_get(sq, --sq->dma_fifo_pc); in mlx5e_dma_unmap_wqe_err() [all …]
|
D | en_main.c | 66 struct mlx5e_sq_param sq; member 214 sq_stats = &c->sq[j].stats; in mlx5e_update_sw_counters() 417 struct mlx5e_icosq *sq, in mlx5e_build_umr_wqe() argument 428 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | in mlx5e_build_umr_wqe() 440 dseg->lkey = sq->mkey_be; in mlx5e_build_umr_wqe() 945 struct mlx5e_icosq *sq = &rq->channel->icosq; in mlx5e_activate_rq() local 946 u16 pi = sq->pc & sq->wq.sz_m1; in mlx5e_activate_rq() 950 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; in mlx5e_activate_rq() 951 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); in mlx5e_activate_rq() 952 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); in mlx5e_activate_rq() [all …]
|
D | en_rx.c | 352 struct mlx5e_icosq *sq = &rq->channel->icosq; in mlx5e_post_umr_wqe() local 353 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_post_umr_wqe() 359 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { in mlx5e_post_umr_wqe() 360 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; in mlx5e_post_umr_wqe() 361 mlx5e_post_nop(wq, sq->sqn, &sq->pc); in mlx5e_post_umr_wqe() 367 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | in mlx5e_post_umr_wqe() 370 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; in mlx5e_post_umr_wqe() 371 sq->pc += num_wqebbs; in mlx5e_post_umr_wqe() 372 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); in mlx5e_post_umr_wqe() 487 struct mlx5e_icosq *sq, in mlx5e_poll_ico_single_cqe() argument [all …]
|
/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 22 static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, 507 struct snd_queue *sq, int q_len, int qidx) in nicvf_init_snd_queue() argument 511 err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, in nicvf_init_snd_queue() 516 sq->desc = sq->dmem.base; in nicvf_init_snd_queue() 517 sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue() 518 if (!sq->skbuff) in nicvf_init_snd_queue() 521 sq->head = 0; in nicvf_init_snd_queue() 522 sq->tail = 0; in nicvf_init_snd_queue() 523 sq->thresh = SND_QUEUE_THRESH; in nicvf_init_snd_queue() 530 sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); in nicvf_init_snd_queue() [all …]
|
D | nicvf_queues.h | 302 struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; member 329 void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, 339 void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); 341 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); 343 struct snd_queue *sq, int qidx); 344 int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, 346 int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, 348 void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
|
D | nicvf_main.c | 522 struct cqe_rx_t *cqe_rx, struct snd_queue *sq, in nicvf_xdp_rx() argument 582 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); in nicvf_xdp_rx() 617 struct snd_queue *sq; in nicvf_snd_pkt_handler() local 621 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler() 623 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); in nicvf_snd_pkt_handler() 632 if (sq->is_xdp) { in nicvf_snd_pkt_handler() 633 page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr]; in nicvf_snd_pkt_handler() 636 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, in nicvf_snd_pkt_handler() 642 sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL; in nicvf_snd_pkt_handler() 647 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; in nicvf_snd_pkt_handler() [all …]
|
/drivers/net/ethernet/huawei/hinic/ |
D | hinic_tx.c | 48 #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) argument 190 qp = container_of(txq->sq, struct hinic_qp, sq); in hinic_xmit_frame() 213 sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); in hinic_xmit_frame() 226 hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); in hinic_xmit_frame() 228 hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); in hinic_xmit_frame() 233 hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); in hinic_xmit_frame() 268 struct hinic_sq *sq = txq->sq; in free_all_tx_skbs() local 275 while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { in free_all_tx_skbs() 280 hinic_sq_put_wqe(sq, wqe_size); in free_all_tx_skbs() 296 struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); in free_tx_poll() [all …]
|
D | hinic_hw_qp.c | 68 #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) argument 70 #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) argument 104 struct hinic_sq *sq, u16 global_qid) in hinic_sq_prepare_ctxt() argument 111 wq = sq->wq; in hinic_sq_prepare_ctxt() 225 static int alloc_sq_skb_arr(struct hinic_sq *sq) in alloc_sq_skb_arr() argument 227 struct hinic_wq *wq = sq->wq; in alloc_sq_skb_arr() 230 skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); in alloc_sq_skb_arr() 231 sq->saved_skb = vzalloc(skb_arr_size); in alloc_sq_skb_arr() 232 if (!sq->saved_skb) in alloc_sq_skb_arr() 242 static void free_sq_skb_arr(struct hinic_sq *sq) in free_sq_skb_arr() argument [all …]
|
D | hinic_hw_qp.h | 63 #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ argument 64 (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) 121 struct hinic_sq sq; member 132 struct hinic_sq *sq, u16 global_qid); 137 int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, 141 void hinic_clean_sq(struct hinic_sq *sq); 148 int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); 152 void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, 156 void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, 159 struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, [all …]
|
/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 58 qp->sq.condition = false; in bnxt_qplib_cancel_phantom_processing() 59 qp->sq.send_phantom = false; in bnxt_qplib_cancel_phantom_processing() 60 qp->sq.single = false; in bnxt_qplib_cancel_phantom_processing() 71 if (!qp->sq.flushed) { in __bnxt_qplib_add_flush_qp() 77 qp->sq.flushed = true; in __bnxt_qplib_add_flush_qp() 168 if (qp->sq.flushed) { in __bnxt_qplib_del_flush_qp() 169 qp->sq.flushed = false; in __bnxt_qplib_del_flush_qp() 186 qp->sq.hwq.prod = 0; in bnxt_qplib_del_flush_qp() 187 qp->sq.hwq.cons = 0; in bnxt_qplib_del_flush_qp() 221 struct bnxt_qplib_q *sq = &qp->sq; in bnxt_qplib_free_qp_hdr_buf() local [all …]
|
/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 188 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 221 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() 239 if (wq->sq.flush_cidx == -1) in c4iw_flush_sq() 240 wq->sq.flush_cidx = wq->sq.cidx; in c4iw_flush_sq() 241 idx = wq->sq.flush_cidx; in c4iw_flush_sq() 242 BUG_ON(idx >= wq->sq.size); in c4iw_flush_sq() 243 while (idx != wq->sq.pidx) { in c4iw_flush_sq() 244 swsqe = &wq->sq.sw_sq[idx]; in c4iw_flush_sq() 248 if (wq->sq.oldest_read == swsqe) { in c4iw_flush_sq() 253 if (++idx == wq->sq.size) in c4iw_flush_sq() [all …]
|
D | qp.c | 94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_oc_sq() argument 96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); in dealloc_oc_sq() 99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_host_sq() argument 101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq() 102 pci_unmap_addr(sq, mapping)); in dealloc_host_sq() 105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in dealloc_sq() argument 107 if (t4_sq_onchip(sq)) in dealloc_sq() 108 dealloc_oc_sq(rdev, sq); in dealloc_sq() 110 dealloc_host_sq(rdev, sq); in dealloc_sq() 113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) in alloc_oc_sq() argument [all …]
|
D | t4.h | 347 struct t4_sq sq; member 402 static inline int t4_sq_onchip(struct t4_sq *sq) in t4_sq_onchip() argument 404 return sq->flags & T4_SQ_ONCHIP; in t4_sq_onchip() 409 return wq->sq.in_use == 0; in t4_sq_empty() 414 return wq->sq.in_use == (wq->sq.size - 1); in t4_sq_full() 419 return wq->sq.size - 1 - wq->sq.in_use; in t4_sq_avail() 424 wq->sq.in_use++; in t4_sq_produce() 425 if (++wq->sq.pidx == wq->sq.size) in t4_sq_produce() 426 wq->sq.pidx = 0; in t4_sq_produce() 427 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_sq_produce() [all …]
|
/drivers/nvme/target/ |
D | core.c | 396 if (req->sq->size) { in __nvmet_req_complete() 398 old_sqhd = req->sq->sqhd; in __nvmet_req_complete() 399 new_sqhd = (old_sqhd + 1) % req->sq->size; in __nvmet_req_complete() 400 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != in __nvmet_req_complete() 403 sqhd = req->sq->sqhd & 0x0000FFFF; in __nvmet_req_complete() 405 req->rsp->sq_id = cpu_to_le16(req->sq->qid); in __nvmet_req_complete() 416 percpu_ref_put(&req->sq->ref); in nvmet_req_complete() 429 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument 432 sq->sqhd = 0; in nvmet_sq_setup() 433 sq->qid = qid; in nvmet_sq_setup() [all …]
|
D | admin-cmd.c | 43 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); in nvmet_get_smart_log_nsid() 77 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all() 175 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl() 292 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); in nvmet_execute_identify_ns() 340 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist() 398 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); in nvmet_execute_identify_desclist() 443 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_execute_set_features() 455 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_execute_set_features() 456 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_execute_set_features() 471 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_execute_get_features() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 220 (n << qp->sq.wqe_shift); in get_send_wqe() 223 (n << qp->sq.wqe_shift)) >> in get_send_wqe() 225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & in get_send_wqe() 504 qp_attr->cap.max_send_wr = qp->sq.max; in mthca_query_qp() 506 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mthca_query_qp() 614 if (qp->sq.max) in __mthca_modify_qp() 615 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; in __mthca_modify_qp() 616 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mthca_modify_qp() 735 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); in __mthca_modify_qp() 836 mthca_wq_reset(&qp->sq); in __mthca_modify_qp() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_qp.c | 239 qp->sq.max_wr = init->cap.max_send_wr; in rxe_qp_init_req() 240 qp->sq.max_sge = init->cap.max_send_sge; in rxe_qp_init_req() 241 qp->sq.max_inline = init->cap.max_inline_data; in rxe_qp_init_req() 244 qp->sq.max_sge * sizeof(struct ib_sge), in rxe_qp_init_req() 246 qp->sq.max_inline); in rxe_qp_init_req() 248 qp->sq.queue = rxe_queue_init(rxe, in rxe_qp_init_req() 249 &qp->sq.max_wr, in rxe_qp_init_req() 251 if (!qp->sq.queue) in rxe_qp_init_req() 255 context, qp->sq.queue->buf, in rxe_qp_init_req() 256 qp->sq.queue->buf_size, &qp->sq.queue->ip); in rxe_qp_init_req() [all …]
|
/drivers/infiniband/hw/mlx5/ |
D | qp.c | 118 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); in mlx5_get_send_wqe() 144 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe() 412 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in calc_sq_size() 413 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { in calc_sq_size() 416 qp->sq.wqe_cnt, in calc_sq_size() 420 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); in calc_sq_size() 421 qp->sq.max_gs = get_send_sge(attr, wqe_size); in calc_sq_size() 422 if (qp->sq.max_gs < attr->cap.max_send_sge) in calc_sq_size() 425 attr->cap.max_send_sge = qp->sq.max_gs; in calc_sq_size() 426 qp->sq.max_post = wq_size / wqe_size; in calc_sq_size() [all …]
|
/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 324 hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; in hns_roce_set_user_sq_size() 325 hr_qp->sq.wqe_shift = ucmd->log_sq_stride; in hns_roce_set_user_sq_size() 330 HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << in hns_roce_set_user_sq_size() 331 hr_qp->sq.wqe_shift), PAGE_SIZE); in hns_roce_set_user_sq_size() 333 hr_qp->sq.offset = 0; in hns_roce_set_user_sq_size() 334 hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << in hns_roce_set_user_sq_size() 335 hr_qp->sq.wqe_shift), PAGE_SIZE); in hns_roce_set_user_sq_size() 354 hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); in hns_roce_set_kernel_sq_size() 361 hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); in hns_roce_set_kernel_sq_size() 362 if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { in hns_roce_set_kernel_sq_size() [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
D | conn.c | 135 *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); in mlx5_fpga_conn_notify_hw() 149 ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); in mlx5_fpga_conn_post_send() 151 ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); in mlx5_fpga_conn_post_send() 166 ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | in mlx5_fpga_conn_post_send() 170 conn->qp.sq.pc++; in mlx5_fpga_conn_post_send() 171 conn->qp.sq.bufs[ix] = buf; in mlx5_fpga_conn_post_send() 188 spin_lock_irqsave(&conn->qp.sq.lock, flags); in mlx5_fpga_conn_send() 190 if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) { in mlx5_fpga_conn_send() 191 list_add_tail(&buf->list, &conn->qp.sq.backlog); in mlx5_fpga_conn_send() 198 spin_unlock_irqrestore(&conn->qp.sq.lock, flags); in mlx5_fpga_conn_send() [all …]
|
/drivers/net/ |
D | virtio_net.c | 135 struct send_queue *sq; member 279 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; in skb_xmit_done() 391 struct send_queue *sq; in virtnet_xdp_xmit() local 397 sq = &vi->sq[qp]; in virtnet_xdp_xmit() 400 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { in virtnet_xdp_xmit() 411 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); in virtnet_xdp_xmit() 413 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC); in virtnet_xdp_xmit() 421 virtqueue_kick(sq->vq); in virtnet_xdp_xmit() 1122 static void free_old_xmit_skbs(struct send_queue *sq) in free_old_xmit_skbs() argument 1126 struct virtnet_info *vi = sq->vq->vdev->priv; in free_old_xmit_skbs() [all …]
|
/drivers/infiniband/hw/vmw_pvrdma/ |
D | pvrdma_qp.c | 121 if (qp->sq.ring) { in pvrdma_reset_qp() 122 atomic_set(&qp->sq.ring->cons_head, 0); in pvrdma_reset_qp() 123 atomic_set(&qp->sq.ring->prod_tail, 0); in pvrdma_reset_qp() 162 qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); in pvrdma_set_sq_size() 163 qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); in pvrdma_set_sq_size() 166 req_cap->max_send_wr = qp->sq.wqe_cnt; in pvrdma_set_sq_size() 167 req_cap->max_send_sge = qp->sq.max_sg; in pvrdma_set_sq_size() 169 qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + in pvrdma_set_sq_size() 171 qp->sq.max_sg); in pvrdma_set_sq_size() 174 (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / in pvrdma_set_sq_size() [all …]
|
/drivers/infiniband/hw/qedr/ |
D | verbs.c | 1263 qp->sq.max_sges = attrs->cap.max_send_sge; in qedr_set_common_qp_params() 1280 qp->sq.max_sges, qp->sq_cq->icid); in qedr_set_common_qp_params() 1285 qp->sq.db = dev->db_addr + in qedr_set_roce_db_info() 1287 qp->sq.db_data.data.icid = qp->icid + 1; in qedr_set_roce_db_info() 1438 &qp->sq.pbl, NULL); in qedr_roce_create_kernel_qp() 1443 in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); in qedr_roce_create_kernel_qp() 1444 in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); in qedr_roce_create_kernel_qp() 1475 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); in qedr_cleanup_kernel() 1506 qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier, in qedr_create_kernel_qp() 1509 qp->wqe_wr_id = kzalloc(qp->sq.max_wr * sizeof(*qp->wqe_wr_id), in qedr_create_kernel_qp() [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | qp.c | 202 return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); in get_send_wqe() 225 s = roundup(size, 1U << qp->sq.wqe_shift); in stamp_send_wqe() 227 ind = (i >> qp->sq.wqe_shift) + n; in stamp_send_wqe() 228 stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : in stamp_send_wqe() 230 buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe() 231 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); in stamp_send_wqe() 235 ctrl = buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in stamp_send_wqe() 251 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); in post_nop_wqe() 276 (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); in post_nop_wqe() 284 unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); in pad_wraparound() [all …]
|
/drivers/net/wireless/realtek/rtlwifi/rtl8192se/ |
D | trx.c | 149 u8 sq; in _rtl92se_query_rxphystatus() local 151 sq = 100; in _rtl92se_query_rxphystatus() 153 sq = cck_buf->sq_rpt; in _rtl92se_query_rxphystatus() 154 if (sq > 64) in _rtl92se_query_rxphystatus() 155 sq = 0; in _rtl92se_query_rxphystatus() 156 else if (sq < 20) in _rtl92se_query_rxphystatus() 157 sq = 100; in _rtl92se_query_rxphystatus() 159 sq = ((64 - sq) * 100) / 44; in _rtl92se_query_rxphystatus() 162 pstats->signalquality = sq; in _rtl92se_query_rxphystatus() 163 pstats->rx_mimo_sig_qual[0] = sq; in _rtl92se_query_rxphystatus()
|