• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include "iwch_provider.h"
33 #include "iwch.h"
34 
35 /*
36  * Get one cq entry from cxio and map it to openib.
37  *
38  * Returns:
39  *	0			EMPTY;
40  *	1			cqe returned
41  *	-EAGAIN		caller must try again
42  *	any other -errno	fatal error
43  */
iwch_poll_cq_one(struct iwch_dev * rhp,struct iwch_cq * chp,struct ib_wc * wc)44 static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
45 			    struct ib_wc *wc)
46 {
47 	struct iwch_qp *qhp = NULL;
48 	struct t3_cqe cqe, *rd_cqe;
49 	struct t3_wq *wq;
50 	u32 credit = 0;
51 	u8 cqe_flushed;
52 	u64 cookie;
53 	int ret = 1;
54 
55 	rd_cqe = cxio_next_cqe(&chp->cq);
56 
57 	if (!rd_cqe)
58 		return 0;
59 
60 	qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
61 	if (!qhp)
62 		wq = NULL;
63 	else {
64 		spin_lock(&qhp->lock);
65 		wq = &(qhp->wq);
66 	}
67 	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
68 				   &credit);
69 	if (t3a_device(chp->rhp) && credit) {
70 		pr_debug("%s updating %d cq credits on id %d\n", __func__,
71 			 credit, chp->cq.cqid);
72 		cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
73 	}
74 
75 	if (ret) {
76 		ret = -EAGAIN;
77 		goto out;
78 	}
79 	ret = 1;
80 
81 	wc->wr_id = cookie;
82 	wc->qp = &qhp->ibqp;
83 	wc->vendor_err = CQE_STATUS(cqe);
84 	wc->wc_flags = 0;
85 
86 	pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
87 		 __func__,
88 		 CQE_QPID(cqe), CQE_TYPE(cqe),
89 		 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
90 		 CQE_WRID_LOW(cqe), (unsigned long long)cookie);
91 
92 	if (CQE_TYPE(cqe) == 0) {
93 		if (!CQE_STATUS(cqe))
94 			wc->byte_len = CQE_LEN(cqe);
95 		else
96 			wc->byte_len = 0;
97 		wc->opcode = IB_WC_RECV;
98 		if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
99 		    CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
100 			wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
101 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
102 		}
103 	} else {
104 		switch (CQE_OPCODE(cqe)) {
105 		case T3_RDMA_WRITE:
106 			wc->opcode = IB_WC_RDMA_WRITE;
107 			break;
108 		case T3_READ_REQ:
109 			wc->opcode = IB_WC_RDMA_READ;
110 			wc->byte_len = CQE_LEN(cqe);
111 			break;
112 		case T3_SEND:
113 		case T3_SEND_WITH_SE:
114 		case T3_SEND_WITH_INV:
115 		case T3_SEND_WITH_SE_INV:
116 			wc->opcode = IB_WC_SEND;
117 			break;
118 		case T3_LOCAL_INV:
119 			wc->opcode = IB_WC_LOCAL_INV;
120 			break;
121 		case T3_FAST_REGISTER:
122 			wc->opcode = IB_WC_REG_MR;
123 			break;
124 		default:
125 			pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
126 			       CQE_OPCODE(cqe), CQE_QPID(cqe));
127 			ret = -EINVAL;
128 			goto out;
129 		}
130 	}
131 
132 	if (cqe_flushed)
133 		wc->status = IB_WC_WR_FLUSH_ERR;
134 	else {
135 
136 		switch (CQE_STATUS(cqe)) {
137 		case TPT_ERR_SUCCESS:
138 			wc->status = IB_WC_SUCCESS;
139 			break;
140 		case TPT_ERR_STAG:
141 			wc->status = IB_WC_LOC_ACCESS_ERR;
142 			break;
143 		case TPT_ERR_PDID:
144 			wc->status = IB_WC_LOC_PROT_ERR;
145 			break;
146 		case TPT_ERR_QPID:
147 		case TPT_ERR_ACCESS:
148 			wc->status = IB_WC_LOC_ACCESS_ERR;
149 			break;
150 		case TPT_ERR_WRAP:
151 			wc->status = IB_WC_GENERAL_ERR;
152 			break;
153 		case TPT_ERR_BOUND:
154 			wc->status = IB_WC_LOC_LEN_ERR;
155 			break;
156 		case TPT_ERR_INVALIDATE_SHARED_MR:
157 		case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
158 			wc->status = IB_WC_MW_BIND_ERR;
159 			break;
160 		case TPT_ERR_CRC:
161 		case TPT_ERR_MARKER:
162 		case TPT_ERR_PDU_LEN_ERR:
163 		case TPT_ERR_OUT_OF_RQE:
164 		case TPT_ERR_DDP_VERSION:
165 		case TPT_ERR_RDMA_VERSION:
166 		case TPT_ERR_DDP_QUEUE_NUM:
167 		case TPT_ERR_MSN:
168 		case TPT_ERR_TBIT:
169 		case TPT_ERR_MO:
170 		case TPT_ERR_MSN_RANGE:
171 		case TPT_ERR_IRD_OVERFLOW:
172 		case TPT_ERR_OPCODE:
173 			wc->status = IB_WC_FATAL_ERR;
174 			break;
175 		case TPT_ERR_SWFLUSH:
176 			wc->status = IB_WC_WR_FLUSH_ERR;
177 			break;
178 		default:
179 			pr_err("Unexpected cqe_status 0x%x for QPID=0x%0x\n",
180 			       CQE_STATUS(cqe), CQE_QPID(cqe));
181 			ret = -EINVAL;
182 		}
183 	}
184 out:
185 	if (wq)
186 		spin_unlock(&qhp->lock);
187 	return ret;
188 }
189 
iwch_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)190 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
191 {
192 	struct iwch_dev *rhp;
193 	struct iwch_cq *chp;
194 	unsigned long flags;
195 	int npolled;
196 	int err = 0;
197 
198 	chp = to_iwch_cq(ibcq);
199 	rhp = chp->rhp;
200 
201 	spin_lock_irqsave(&chp->lock, flags);
202 	for (npolled = 0; npolled < num_entries; ++npolled) {
203 #ifdef DEBUG
204 		int i=0;
205 #endif
206 
207 		/*
208 		 * Because T3 can post CQEs that are _not_ associated
209 		 * with a WR, we might have to poll again after removing
210 		 * one of these.
211 		 */
212 		do {
213 			err = iwch_poll_cq_one(rhp, chp, wc + npolled);
214 #ifdef DEBUG
215 			BUG_ON(++i > 1000);
216 #endif
217 		} while (err == -EAGAIN);
218 		if (err <= 0)
219 			break;
220 	}
221 	spin_unlock_irqrestore(&chp->lock, flags);
222 
223 	if (err < 0)
224 		return err;
225 	else {
226 		return npolled;
227 	}
228 }
229