Lines Matching refs:cqe64
82 struct mlx5_cqe64 *cqe64; in get_sw_cqe() local
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
86 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) && in get_sw_cqe()
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
341 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, in handle_atomics() argument
453 struct mlx5_cqe64 *cqe64; in mlx5_poll_one() local
467 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
476 opcode = get_cqe_opcode(cqe64); in mlx5_poll_one()
489 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; in mlx5_poll_one()
503 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); in mlx5_poll_one()
505 handle_good_req(wc, cqe64, wq, idx); in mlx5_poll_one()
506 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); in mlx5_poll_one()
515 handle_responder(wc, cqe64, *cur_qp); in mlx5_poll_one()
522 err_cqe = (struct mlx5_err_cqe *)cqe64; in mlx5_poll_one()
535 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); in mlx5_poll_one()
544 wqe_ctr = be16_to_cpu(cqe64->wqe_counter); in mlx5_poll_one()
556 (struct mlx5_sig_err_cqe *)cqe64; in mlx5_poll_one()
870 struct mlx5_cqe64 *cqe64; in init_cq_frag_buf() local
874 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_frag_buf()
875 cqe64->op_own = MLX5_CQE_INVALID << 4; in init_cq_frag_buf()
1066 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn) in is_equal_rsn() argument
1068 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff); in is_equal_rsn()
1073 struct mlx5_cqe64 *cqe64, *dest64; in __mlx5_ib_cq_clean() local
1097 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
1098 if (is_equal_rsn(cqe64, rsn)) { in __mlx5_ib_cq_clean()
1099 if (srq && (ntohl(cqe64->srqn) & 0xffffff)) in __mlx5_ib_cq_clean()
1100 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter)); in __mlx5_ib_cq_clean()