• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *      - Redistributions in binary form must reproduce the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer in the documentation and/or other materials
20  *        provided with the distribution.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29  * SOFTWARE.
30  */
31 #ifndef __T4_H__
32 #define __T4_H__
33 
34 #include "t4_hw.h"
35 #include "t4_regs.h"
36 #include "t4_values.h"
37 #include "t4_msg.h"
38 #include "t4fw_ri_api.h"
39 
40 #define T4_MAX_NUM_PD 65536
41 #define T4_MAX_MR_SIZE (~0ULL)
42 #define T4_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
43 #define T4_STAG_UNSET 0xffffffff
44 #define T4_FW_MAJ 0
45 #define PCIE_MA_SYNC_A 0x30b4
46 
47 struct t4_status_page {
48 	__be32 rsvd1;	/* flit 0 - hw owns */
49 	__be16 rsvd2;
50 	__be16 qid;
51 	__be16 cidx;
52 	__be16 pidx;
53 	u8 qp_err;	/* flit 1 - sw owns */
54 	u8 db_off;
55 	u8 pad;
56 	u16 host_wq_pidx;
57 	u16 host_cidx;
58 	u16 host_pidx;
59 };
60 
61 #define T4_EQ_ENTRY_SIZE 64
62 
63 #define T4_SQ_NUM_SLOTS 5
64 #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
65 #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
66 			sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
67 #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
68 			sizeof(struct fw_ri_immd)))
69 #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
70 			sizeof(struct fw_ri_rdma_write_wr) - \
71 			sizeof(struct fw_ri_immd)))
72 #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
73 			sizeof(struct fw_ri_rdma_write_wr) - \
74 			sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
75 #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
76 			sizeof(struct fw_ri_immd)) & ~31UL)
77 #define T4_MAX_FR_IMMD_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
78 #define T4_MAX_FR_DSGL 1024
79 #define T4_MAX_FR_DSGL_DEPTH (T4_MAX_FR_DSGL / sizeof(u64))
80 
t4_max_fr_depth(int use_dsgl)81 static inline int t4_max_fr_depth(int use_dsgl)
82 {
83 	return use_dsgl ? T4_MAX_FR_DSGL_DEPTH : T4_MAX_FR_IMMD_DEPTH;
84 }
85 
86 #define T4_RQ_NUM_SLOTS 2
87 #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
88 #define T4_MAX_RECV_SGE 4
89 
90 union t4_wr {
91 	struct fw_ri_res_wr res;
92 	struct fw_ri_wr ri;
93 	struct fw_ri_rdma_write_wr write;
94 	struct fw_ri_send_wr send;
95 	struct fw_ri_rdma_read_wr read;
96 	struct fw_ri_bind_mw_wr bind;
97 	struct fw_ri_fr_nsmr_wr fr;
98 	struct fw_ri_fr_nsmr_tpte_wr fr_tpte;
99 	struct fw_ri_inv_lstag_wr inv;
100 	struct t4_status_page status;
101 	__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
102 };
103 
104 union t4_recv_wr {
105 	struct fw_ri_recv_wr recv;
106 	struct t4_status_page status;
107 	__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
108 };
109 
init_wr_hdr(union t4_wr * wqe,u16 wrid,enum fw_wr_opcodes opcode,u8 flags,u8 len16)110 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
111 			       enum fw_wr_opcodes opcode, u8 flags, u8 len16)
112 {
113 	wqe->send.opcode = (u8)opcode;
114 	wqe->send.flags = flags;
115 	wqe->send.wrid = wrid;
116 	wqe->send.r1[0] = 0;
117 	wqe->send.r1[1] = 0;
118 	wqe->send.r1[2] = 0;
119 	wqe->send.len16 = len16;
120 }
121 
122 /* CQE/AE status codes */
123 #define T4_ERR_SUCCESS                     0x0
124 #define T4_ERR_STAG                        0x1	/* STAG invalid: either the */
125 						/* STAG is offlimt, being 0, */
126 						/* or STAG_key mismatch */
127 #define T4_ERR_PDID                        0x2	/* PDID mismatch */
128 #define T4_ERR_QPID                        0x3	/* QPID mismatch */
129 #define T4_ERR_ACCESS                      0x4	/* Invalid access right */
130 #define T4_ERR_WRAP                        0x5	/* Wrap error */
131 #define T4_ERR_BOUND                       0x6	/* base and bounds voilation */
132 #define T4_ERR_INVALIDATE_SHARED_MR        0x7	/* attempt to invalidate a  */
133 						/* shared memory region */
134 #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8	/* attempt to invalidate a  */
135 						/* shared memory region */
136 #define T4_ERR_ECC                         0x9	/* ECC error detected */
137 #define T4_ERR_ECC_PSTAG                   0xA	/* ECC error detected when  */
138 						/* reading PSTAG for a MW  */
139 						/* Invalidate */
140 #define T4_ERR_PBL_ADDR_BOUND              0xB	/* pbl addr out of bounds:  */
141 						/* software error */
142 #define T4_ERR_SWFLUSH			   0xC	/* SW FLUSHED */
143 #define T4_ERR_CRC                         0x10 /* CRC error */
144 #define T4_ERR_MARKER                      0x11 /* Marker error */
145 #define T4_ERR_PDU_LEN_ERR                 0x12 /* invalid PDU length */
146 #define T4_ERR_OUT_OF_RQE                  0x13 /* out of RQE */
147 #define T4_ERR_DDP_VERSION                 0x14 /* wrong DDP version */
148 #define T4_ERR_RDMA_VERSION                0x15 /* wrong RDMA version */
149 #define T4_ERR_OPCODE                      0x16 /* invalid rdma opcode */
150 #define T4_ERR_DDP_QUEUE_NUM               0x17 /* invalid ddp queue number */
151 #define T4_ERR_MSN                         0x18 /* MSN error */
152 #define T4_ERR_TBIT                        0x19 /* tag bit not set correctly */
153 #define T4_ERR_MO                          0x1A /* MO not 0 for TERMINATE  */
154 						/* or READ_REQ */
155 #define T4_ERR_MSN_GAP                     0x1B
156 #define T4_ERR_MSN_RANGE                   0x1C
157 #define T4_ERR_IRD_OVERFLOW                0x1D
158 #define T4_ERR_RQE_ADDR_BOUND              0x1E /* RQE addr out of bounds:  */
159 						/* software error */
160 #define T4_ERR_INTERNAL_ERR                0x1F /* internal error (opcode  */
161 						/* mismatch) */
162 /*
163  * CQE defs
164  */
165 struct t4_cqe {
166 	__be32 header;
167 	__be32 len;
168 	union {
169 		struct {
170 			__be32 stag;
171 			__be32 msn;
172 		} rcqe;
173 		struct {
174 			__be32 stag;
175 			u16 nada2;
176 			u16 cidx;
177 		} scqe;
178 		struct {
179 			__be32 wrid_hi;
180 			__be32 wrid_low;
181 		} gen;
182 		u64 drain_cookie;
183 	} u;
184 	__be64 reserved;
185 	__be64 bits_type_ts;
186 };
187 
188 /* macros for flit 0 of the cqe */
189 
190 #define CQE_QPID_S        12
191 #define CQE_QPID_M        0xFFFFF
192 #define CQE_QPID_G(x)     ((((x) >> CQE_QPID_S)) & CQE_QPID_M)
193 #define CQE_QPID_V(x)	  ((x)<<CQE_QPID_S)
194 
195 #define CQE_SWCQE_S       11
196 #define CQE_SWCQE_M       0x1
197 #define CQE_SWCQE_G(x)    ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
198 #define CQE_SWCQE_V(x)	  ((x)<<CQE_SWCQE_S)
199 
200 #define CQE_DRAIN_S       10
201 #define CQE_DRAIN_M       0x1
202 #define CQE_DRAIN_G(x)    ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
203 #define CQE_DRAIN_V(x)	  ((x)<<CQE_DRAIN_S)
204 
205 #define CQE_STATUS_S      5
206 #define CQE_STATUS_M      0x1F
207 #define CQE_STATUS_G(x)   ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
208 #define CQE_STATUS_V(x)   ((x)<<CQE_STATUS_S)
209 
210 #define CQE_TYPE_S        4
211 #define CQE_TYPE_M        0x1
212 #define CQE_TYPE_G(x)     ((((x) >> CQE_TYPE_S)) & CQE_TYPE_M)
213 #define CQE_TYPE_V(x)     ((x)<<CQE_TYPE_S)
214 
215 #define CQE_OPCODE_S      0
216 #define CQE_OPCODE_M      0xF
217 #define CQE_OPCODE_G(x)   ((((x) >> CQE_OPCODE_S)) & CQE_OPCODE_M)
218 #define CQE_OPCODE_V(x)   ((x)<<CQE_OPCODE_S)
219 
220 #define SW_CQE(x)         (CQE_SWCQE_G(be32_to_cpu((x)->header)))
221 #define DRAIN_CQE(x)      (CQE_DRAIN_G(be32_to_cpu((x)->header)))
222 #define CQE_QPID(x)       (CQE_QPID_G(be32_to_cpu((x)->header)))
223 #define CQE_TYPE(x)       (CQE_TYPE_G(be32_to_cpu((x)->header)))
224 #define SQ_TYPE(x)	  (CQE_TYPE((x)))
225 #define RQ_TYPE(x)	  (!CQE_TYPE((x)))
226 #define CQE_STATUS(x)     (CQE_STATUS_G(be32_to_cpu((x)->header)))
227 #define CQE_OPCODE(x)     (CQE_OPCODE_G(be32_to_cpu((x)->header)))
228 
229 #define CQE_SEND_OPCODE(x)( \
230 	(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
231 	(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
232 	(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
233 	(CQE_OPCODE_G(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
234 
235 #define CQE_LEN(x)        (be32_to_cpu((x)->len))
236 
237 /* used for RQ completion processing */
238 #define CQE_WRID_STAG(x)  (be32_to_cpu((x)->u.rcqe.stag))
239 #define CQE_WRID_MSN(x)   (be32_to_cpu((x)->u.rcqe.msn))
240 
241 /* used for SQ completion processing */
242 #define CQE_WRID_SQ_IDX(x)	((x)->u.scqe.cidx)
243 #define CQE_WRID_FR_STAG(x)     (be32_to_cpu((x)->u.scqe.stag))
244 
245 /* generic accessor macros */
246 #define CQE_WRID_HI(x)		(be32_to_cpu((x)->u.gen.wrid_hi))
247 #define CQE_WRID_LOW(x)		(be32_to_cpu((x)->u.gen.wrid_low))
248 #define CQE_DRAIN_COOKIE(x)	((x)->u.drain_cookie)
249 
250 /* macros for flit 3 of the cqe */
251 #define CQE_GENBIT_S	63
252 #define CQE_GENBIT_M	0x1
253 #define CQE_GENBIT_G(x)	(((x) >> CQE_GENBIT_S) & CQE_GENBIT_M)
254 #define CQE_GENBIT_V(x) ((x)<<CQE_GENBIT_S)
255 
256 #define CQE_OVFBIT_S	62
257 #define CQE_OVFBIT_M	0x1
258 #define CQE_OVFBIT_G(x)	((((x) >> CQE_OVFBIT_S)) & CQE_OVFBIT_M)
259 
260 #define CQE_IQTYPE_S	60
261 #define CQE_IQTYPE_M	0x3
262 #define CQE_IQTYPE_G(x)	((((x) >> CQE_IQTYPE_S)) & CQE_IQTYPE_M)
263 
264 #define CQE_TS_M	0x0fffffffffffffffULL
265 #define CQE_TS_G(x)	((x) & CQE_TS_M)
266 
267 #define CQE_OVFBIT(x)	((unsigned)CQE_OVFBIT_G(be64_to_cpu((x)->bits_type_ts)))
268 #define CQE_GENBIT(x)	((unsigned)CQE_GENBIT_G(be64_to_cpu((x)->bits_type_ts)))
269 #define CQE_TS(x)	(CQE_TS_G(be64_to_cpu((x)->bits_type_ts)))
270 
271 struct t4_swsqe {
272 	u64			wr_id;
273 	struct t4_cqe		cqe;
274 	int			read_len;
275 	int			opcode;
276 	int			complete;
277 	int			signaled;
278 	u16			idx;
279 	int                     flushed;
280 	struct timespec         host_ts;
281 	u64                     sge_ts;
282 };
283 
t4_pgprot_wc(pgprot_t prot)284 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
285 {
286 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
287 	return pgprot_writecombine(prot);
288 #else
289 	return pgprot_noncached(prot);
290 #endif
291 }
292 
293 enum {
294 	T4_SQ_ONCHIP = (1<<0),
295 };
296 
297 struct t4_sq {
298 	union t4_wr *queue;
299 	dma_addr_t dma_addr;
300 	DEFINE_DMA_UNMAP_ADDR(mapping);
301 	unsigned long phys_addr;
302 	struct t4_swsqe *sw_sq;
303 	struct t4_swsqe *oldest_read;
304 	void __iomem *bar2_va;
305 	u64 bar2_pa;
306 	size_t memsize;
307 	u32 bar2_qid;
308 	u32 qid;
309 	u16 in_use;
310 	u16 size;
311 	u16 cidx;
312 	u16 pidx;
313 	u16 wq_pidx;
314 	u16 wq_pidx_inc;
315 	u16 flags;
316 	short flush_cidx;
317 };
318 
319 struct t4_swrqe {
320 	u64 wr_id;
321 	struct timespec host_ts;
322 	u64 sge_ts;
323 };
324 
325 struct t4_rq {
326 	union  t4_recv_wr *queue;
327 	dma_addr_t dma_addr;
328 	DEFINE_DMA_UNMAP_ADDR(mapping);
329 	struct t4_swrqe *sw_rq;
330 	void __iomem *bar2_va;
331 	u64 bar2_pa;
332 	size_t memsize;
333 	u32 bar2_qid;
334 	u32 qid;
335 	u32 msn;
336 	u32 rqt_hwaddr;
337 	u16 rqt_size;
338 	u16 in_use;
339 	u16 size;
340 	u16 cidx;
341 	u16 pidx;
342 	u16 wq_pidx;
343 	u16 wq_pidx_inc;
344 };
345 
346 struct t4_wq {
347 	struct t4_sq sq;
348 	struct t4_rq rq;
349 	void __iomem *db;
350 	struct c4iw_rdev *rdev;
351 	int flushed;
352 };
353 
t4_rqes_posted(struct t4_wq * wq)354 static inline int t4_rqes_posted(struct t4_wq *wq)
355 {
356 	return wq->rq.in_use;
357 }
358 
t4_rq_empty(struct t4_wq * wq)359 static inline int t4_rq_empty(struct t4_wq *wq)
360 {
361 	return wq->rq.in_use == 0;
362 }
363 
t4_rq_full(struct t4_wq * wq)364 static inline int t4_rq_full(struct t4_wq *wq)
365 {
366 	return wq->rq.in_use == (wq->rq.size - 1);
367 }
368 
t4_rq_avail(struct t4_wq * wq)369 static inline u32 t4_rq_avail(struct t4_wq *wq)
370 {
371 	return wq->rq.size - 1 - wq->rq.in_use;
372 }
373 
t4_rq_produce(struct t4_wq * wq,u8 len16)374 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
375 {
376 	wq->rq.in_use++;
377 	if (++wq->rq.pidx == wq->rq.size)
378 		wq->rq.pidx = 0;
379 	wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
380 	if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
381 		wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
382 }
383 
t4_rq_consume(struct t4_wq * wq)384 static inline void t4_rq_consume(struct t4_wq *wq)
385 {
386 	wq->rq.in_use--;
387 	wq->rq.msn++;
388 	if (++wq->rq.cidx == wq->rq.size)
389 		wq->rq.cidx = 0;
390 }
391 
t4_rq_host_wq_pidx(struct t4_wq * wq)392 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
393 {
394 	return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
395 }
396 
t4_rq_wq_size(struct t4_wq * wq)397 static inline u16 t4_rq_wq_size(struct t4_wq *wq)
398 {
399 		return wq->rq.size * T4_RQ_NUM_SLOTS;
400 }
401 
t4_sq_onchip(struct t4_sq * sq)402 static inline int t4_sq_onchip(struct t4_sq *sq)
403 {
404 	return sq->flags & T4_SQ_ONCHIP;
405 }
406 
t4_sq_empty(struct t4_wq * wq)407 static inline int t4_sq_empty(struct t4_wq *wq)
408 {
409 	return wq->sq.in_use == 0;
410 }
411 
t4_sq_full(struct t4_wq * wq)412 static inline int t4_sq_full(struct t4_wq *wq)
413 {
414 	return wq->sq.in_use == (wq->sq.size - 1);
415 }
416 
t4_sq_avail(struct t4_wq * wq)417 static inline u32 t4_sq_avail(struct t4_wq *wq)
418 {
419 	return wq->sq.size - 1 - wq->sq.in_use;
420 }
421 
t4_sq_produce(struct t4_wq * wq,u8 len16)422 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
423 {
424 	wq->sq.in_use++;
425 	if (++wq->sq.pidx == wq->sq.size)
426 		wq->sq.pidx = 0;
427 	wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
428 	if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
429 		wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
430 }
431 
t4_sq_consume(struct t4_wq * wq)432 static inline void t4_sq_consume(struct t4_wq *wq)
433 {
434 	BUG_ON(wq->sq.in_use < 1);
435 	if (wq->sq.cidx == wq->sq.flush_cidx)
436 		wq->sq.flush_cidx = -1;
437 	wq->sq.in_use--;
438 	if (++wq->sq.cidx == wq->sq.size)
439 		wq->sq.cidx = 0;
440 }
441 
t4_sq_host_wq_pidx(struct t4_wq * wq)442 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
443 {
444 	return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
445 }
446 
t4_sq_wq_size(struct t4_wq * wq)447 static inline u16 t4_sq_wq_size(struct t4_wq *wq)
448 {
449 		return wq->sq.size * T4_SQ_NUM_SLOTS;
450 }
451 
452 /* This function copies 64 byte coalesced work request to memory
453  * mapped BAR2 space. For coalesced WRs, the SGE fetches data
454  * from the FIFO instead of from Host.
455  */
pio_copy(u64 __iomem * dst,u64 * src)456 static inline void pio_copy(u64 __iomem *dst, u64 *src)
457 {
458 	int count = 8;
459 
460 	while (count) {
461 		writeq(*src, dst);
462 		src++;
463 		dst++;
464 		count--;
465 	}
466 }
467 
t4_ring_sq_db(struct t4_wq * wq,u16 inc,union t4_wr * wqe)468 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
469 {
470 
471 	/* Flush host queue memory writes. */
472 	wmb();
473 	if (wq->sq.bar2_va) {
474 		if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
475 			pr_debug("%s: WC wq->sq.pidx = %d\n",
476 				 __func__, wq->sq.pidx);
477 			pio_copy((u64 __iomem *)
478 				 (wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
479 				 (u64 *)wqe);
480 		} else {
481 			pr_debug("%s: DB wq->sq.pidx = %d\n",
482 				 __func__, wq->sq.pidx);
483 			writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
484 			       wq->sq.bar2_va + SGE_UDB_KDOORBELL);
485 		}
486 
487 		/* Flush user doorbell area writes. */
488 		wmb();
489 		return;
490 	}
491 	writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db);
492 }
493 
t4_ring_rq_db(struct t4_wq * wq,u16 inc,union t4_recv_wr * wqe)494 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
495 				 union t4_recv_wr *wqe)
496 {
497 
498 	/* Flush host queue memory writes. */
499 	wmb();
500 	if (wq->rq.bar2_va) {
501 		if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
502 			pr_debug("%s: WC wq->rq.pidx = %d\n",
503 				 __func__, wq->rq.pidx);
504 			pio_copy((u64 __iomem *)
505 				 (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
506 				 (void *)wqe);
507 		} else {
508 			pr_debug("%s: DB wq->rq.pidx = %d\n",
509 				 __func__, wq->rq.pidx);
510 			writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
511 			       wq->rq.bar2_va + SGE_UDB_KDOORBELL);
512 		}
513 
514 		/* Flush user doorbell area writes. */
515 		wmb();
516 		return;
517 	}
518 	writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db);
519 }
520 
t4_wq_in_error(struct t4_wq * wq)521 static inline int t4_wq_in_error(struct t4_wq *wq)
522 {
523 	return wq->rq.queue[wq->rq.size].status.qp_err;
524 }
525 
t4_set_wq_in_error(struct t4_wq * wq)526 static inline void t4_set_wq_in_error(struct t4_wq *wq)
527 {
528 	wq->rq.queue[wq->rq.size].status.qp_err = 1;
529 }
530 
t4_disable_wq_db(struct t4_wq * wq)531 static inline void t4_disable_wq_db(struct t4_wq *wq)
532 {
533 	wq->rq.queue[wq->rq.size].status.db_off = 1;
534 }
535 
t4_enable_wq_db(struct t4_wq * wq)536 static inline void t4_enable_wq_db(struct t4_wq *wq)
537 {
538 	wq->rq.queue[wq->rq.size].status.db_off = 0;
539 }
540 
t4_wq_db_enabled(struct t4_wq * wq)541 static inline int t4_wq_db_enabled(struct t4_wq *wq)
542 {
543 	return !wq->rq.queue[wq->rq.size].status.db_off;
544 }
545 
546 enum t4_cq_flags {
547 	CQ_ARMED	= 1,
548 };
549 
550 struct t4_cq {
551 	struct t4_cqe *queue;
552 	dma_addr_t dma_addr;
553 	DEFINE_DMA_UNMAP_ADDR(mapping);
554 	struct t4_cqe *sw_queue;
555 	void __iomem *gts;
556 	void __iomem *bar2_va;
557 	u64 bar2_pa;
558 	u32 bar2_qid;
559 	struct c4iw_rdev *rdev;
560 	size_t memsize;
561 	__be64 bits_type_ts;
562 	u32 cqid;
563 	u32 qid_mask;
564 	int vector;
565 	u16 size; /* including status page */
566 	u16 cidx;
567 	u16 sw_pidx;
568 	u16 sw_cidx;
569 	u16 sw_in_use;
570 	u16 cidx_inc;
571 	u8 gen;
572 	u8 error;
573 	unsigned long flags;
574 };
575 
write_gts(struct t4_cq * cq,u32 val)576 static inline void write_gts(struct t4_cq *cq, u32 val)
577 {
578 	if (cq->bar2_va)
579 		writel(val | INGRESSQID_V(cq->bar2_qid),
580 		       cq->bar2_va + SGE_UDB_GTS);
581 	else
582 		writel(val | INGRESSQID_V(cq->cqid), cq->gts);
583 }
584 
t4_clear_cq_armed(struct t4_cq * cq)585 static inline int t4_clear_cq_armed(struct t4_cq *cq)
586 {
587 	return test_and_clear_bit(CQ_ARMED, &cq->flags);
588 }
589 
t4_arm_cq(struct t4_cq * cq,int se)590 static inline int t4_arm_cq(struct t4_cq *cq, int se)
591 {
592 	u32 val;
593 
594 	set_bit(CQ_ARMED, &cq->flags);
595 	while (cq->cidx_inc > CIDXINC_M) {
596 		val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7);
597 		write_gts(cq, val);
598 		cq->cidx_inc -= CIDXINC_M;
599 	}
600 	val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6);
601 	write_gts(cq, val);
602 	cq->cidx_inc = 0;
603 	return 0;
604 }
605 
t4_swcq_produce(struct t4_cq * cq)606 static inline void t4_swcq_produce(struct t4_cq *cq)
607 {
608 	cq->sw_in_use++;
609 	if (cq->sw_in_use == cq->size) {
610 		pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
611 			 __func__, cq->cqid);
612 		cq->error = 1;
613 		BUG_ON(1);
614 	}
615 	if (++cq->sw_pidx == cq->size)
616 		cq->sw_pidx = 0;
617 }
618 
t4_swcq_consume(struct t4_cq * cq)619 static inline void t4_swcq_consume(struct t4_cq *cq)
620 {
621 	BUG_ON(cq->sw_in_use < 1);
622 	cq->sw_in_use--;
623 	if (++cq->sw_cidx == cq->size)
624 		cq->sw_cidx = 0;
625 }
626 
t4_hwcq_consume(struct t4_cq * cq)627 static inline void t4_hwcq_consume(struct t4_cq *cq)
628 {
629 	cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
630 	if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) {
631 		u32 val;
632 
633 		val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7);
634 		write_gts(cq, val);
635 		cq->cidx_inc = 0;
636 	}
637 	if (++cq->cidx == cq->size) {
638 		cq->cidx = 0;
639 		cq->gen ^= 1;
640 	}
641 }
642 
t4_valid_cqe(struct t4_cq * cq,struct t4_cqe * cqe)643 static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
644 {
645 	return (CQE_GENBIT(cqe) == cq->gen);
646 }
647 
t4_cq_notempty(struct t4_cq * cq)648 static inline int t4_cq_notempty(struct t4_cq *cq)
649 {
650 	return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
651 }
652 
t4_next_hw_cqe(struct t4_cq * cq,struct t4_cqe ** cqe)653 static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
654 {
655 	int ret;
656 	u16 prev_cidx;
657 
658 	if (cq->cidx == 0)
659 		prev_cidx = cq->size - 1;
660 	else
661 		prev_cidx = cq->cidx - 1;
662 
663 	if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
664 		ret = -EOVERFLOW;
665 		cq->error = 1;
666 		pr_err("cq overflow cqid %u\n", cq->cqid);
667 		BUG_ON(1);
668 	} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
669 
670 		/* Ensure CQE is flushed to memory */
671 		rmb();
672 		*cqe = &cq->queue[cq->cidx];
673 		ret = 0;
674 	} else
675 		ret = -ENODATA;
676 	return ret;
677 }
678 
t4_next_sw_cqe(struct t4_cq * cq)679 static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
680 {
681 	if (cq->sw_in_use == cq->size) {
682 		pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
683 			 __func__, cq->cqid);
684 		cq->error = 1;
685 		BUG_ON(1);
686 		return NULL;
687 	}
688 	if (cq->sw_in_use)
689 		return &cq->sw_queue[cq->sw_cidx];
690 	return NULL;
691 }
692 
t4_next_cqe(struct t4_cq * cq,struct t4_cqe ** cqe)693 static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
694 {
695 	int ret = 0;
696 
697 	if (cq->error)
698 		ret = -ENODATA;
699 	else if (cq->sw_in_use)
700 		*cqe = &cq->sw_queue[cq->sw_cidx];
701 	else
702 		ret = t4_next_hw_cqe(cq, cqe);
703 	return ret;
704 }
705 
t4_cq_in_error(struct t4_cq * cq)706 static inline int t4_cq_in_error(struct t4_cq *cq)
707 {
708 	return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
709 }
710 
t4_set_cq_in_error(struct t4_cq * cq)711 static inline void t4_set_cq_in_error(struct t4_cq *cq)
712 {
713 	((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
714 }
715 #endif
716 
717 struct t4_dev_status_page {
718 	u8 db_off;
719 	u8 pad1;
720 	u16 pad2;
721 	u32 pad3;
722 	u64 qp_start;
723 	u64 qp_size;
724 	u64 cq_start;
725 	u64 cq_size;
726 };
727