• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #ifndef IPATH_VERBS_H
35 #define IPATH_VERBS_H
36 
37 #include <linux/types.h>
38 #include <linux/spinlock.h>
39 #include <linux/kernel.h>
40 #include <linux/interrupt.h>
41 #include <linux/kref.h>
42 #include <rdma/ib_pack.h>
43 #include <rdma/ib_user_verbs.h>
44 
45 #include "ipath_kernel.h"
46 
47 #define IPATH_MAX_RDMA_ATOMIC	4
48 
49 #define QPN_MAX                 (1 << 24)
50 #define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
51 
52 /*
53  * Increment this value if any changes that break userspace ABI
54  * compatibility are made.
55  */
56 #define IPATH_UVERBS_ABI_VERSION       2
57 
58 /*
59  * Define an ib_cq_notify value that is not valid so we know when CQ
60  * notifications are armed.
61  */
62 #define IB_CQ_NONE	(IB_CQ_NEXT_COMP + 1)
63 
64 /* AETH NAK opcode values */
65 #define IB_RNR_NAK			0x20
66 #define IB_NAK_PSN_ERROR		0x60
67 #define IB_NAK_INVALID_REQUEST		0x61
68 #define IB_NAK_REMOTE_ACCESS_ERROR	0x62
69 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
70 #define IB_NAK_INVALID_RD_REQUEST	0x64
71 
72 /* Flags for checking QP state (see ib_ipath_state_ops[]) */
73 #define IPATH_POST_SEND_OK		0x01
74 #define IPATH_POST_RECV_OK		0x02
75 #define IPATH_PROCESS_RECV_OK		0x04
76 #define IPATH_PROCESS_SEND_OK		0x08
77 #define IPATH_PROCESS_NEXT_SEND_OK	0x10
78 #define IPATH_FLUSH_SEND		0x20
79 #define IPATH_FLUSH_RECV		0x40
80 #define IPATH_PROCESS_OR_FLUSH_SEND \
81 	(IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
82 
83 /* IB Performance Manager status values */
84 #define IB_PMA_SAMPLE_STATUS_DONE	0x00
85 #define IB_PMA_SAMPLE_STATUS_STARTED	0x01
86 #define IB_PMA_SAMPLE_STATUS_RUNNING	0x02
87 
88 /* Mandatory IB performance counter select values. */
89 #define IB_PMA_PORT_XMIT_DATA	cpu_to_be16(0x0001)
90 #define IB_PMA_PORT_RCV_DATA	cpu_to_be16(0x0002)
91 #define IB_PMA_PORT_XMIT_PKTS	cpu_to_be16(0x0003)
92 #define IB_PMA_PORT_RCV_PKTS	cpu_to_be16(0x0004)
93 #define IB_PMA_PORT_XMIT_WAIT	cpu_to_be16(0x0005)
94 
95 struct ib_reth {
96 	__be64 vaddr;
97 	__be32 rkey;
98 	__be32 length;
99 } __attribute__ ((packed));
100 
101 struct ib_atomic_eth {
102 	__be32 vaddr[2];	/* unaligned so access as 2 32-bit words */
103 	__be32 rkey;
104 	__be64 swap_data;
105 	__be64 compare_data;
106 } __attribute__ ((packed));
107 
108 struct ipath_other_headers {
109 	__be32 bth[3];
110 	union {
111 		struct {
112 			__be32 deth[2];
113 			__be32 imm_data;
114 		} ud;
115 		struct {
116 			struct ib_reth reth;
117 			__be32 imm_data;
118 		} rc;
119 		struct {
120 			__be32 aeth;
121 			__be32 atomic_ack_eth[2];
122 		} at;
123 		__be32 imm_data;
124 		__be32 aeth;
125 		struct ib_atomic_eth atomic_eth;
126 	} u;
127 } __attribute__ ((packed));
128 
129 /*
130  * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
131  * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
132  * will be in the eager header buffer.  The remaining 12 or 16 bytes
133  * are in the data buffer.
134  */
135 struct ipath_ib_header {
136 	__be16 lrh[4];
137 	union {
138 		struct {
139 			struct ib_grh grh;
140 			struct ipath_other_headers oth;
141 		} l;
142 		struct ipath_other_headers oth;
143 	} u;
144 } __attribute__ ((packed));
145 
146 struct ipath_pio_header {
147 	__le32 pbc[2];
148 	struct ipath_ib_header hdr;
149 } __attribute__ ((packed));
150 
151 /*
152  * There is one struct ipath_mcast for each multicast GID.
153  * All attached QPs are then stored as a list of
154  * struct ipath_mcast_qp.
155  */
156 struct ipath_mcast_qp {
157 	struct list_head list;
158 	struct ipath_qp *qp;
159 };
160 
161 struct ipath_mcast {
162 	struct rb_node rb_node;
163 	union ib_gid mgid;
164 	struct list_head qp_list;
165 	wait_queue_head_t wait;
166 	atomic_t refcount;
167 	int n_attached;
168 };
169 
170 /* Protection domain */
171 struct ipath_pd {
172 	struct ib_pd ibpd;
173 	int user;		/* non-zero if created from user space */
174 };
175 
176 /* Address Handle */
177 struct ipath_ah {
178 	struct ib_ah ibah;
179 	struct ib_ah_attr attr;
180 };
181 
182 /*
183  * This structure is used by ipath_mmap() to validate an offset
184  * when an mmap() request is made.  The vm_area_struct then uses
185  * this as its vm_private_data.
186  */
187 struct ipath_mmap_info {
188 	struct list_head pending_mmaps;
189 	struct ib_ucontext *context;
190 	void *obj;
191 	__u64 offset;
192 	struct kref ref;
193 	unsigned size;
194 };
195 
196 /*
197  * This structure is used to contain the head pointer, tail pointer,
198  * and completion queue entries as a single memory allocation so
199  * it can be mmap'ed into user space.
200  */
201 struct ipath_cq_wc {
202 	u32 head;		/* index of next entry to fill */
203 	u32 tail;		/* index of next ib_poll_cq() entry */
204 	union {
205 		/* these are actually size ibcq.cqe + 1 */
206 		struct ib_uverbs_wc uqueue[0];
207 		struct ib_wc kqueue[0];
208 	};
209 };
210 
211 /*
212  * The completion queue structure.
213  */
214 struct ipath_cq {
215 	struct ib_cq ibcq;
216 	struct tasklet_struct comptask;
217 	spinlock_t lock;
218 	u8 notify;
219 	u8 triggered;
220 	struct ipath_cq_wc *queue;
221 	struct ipath_mmap_info *ip;
222 };
223 
224 /*
225  * A segment is a linear region of low physical memory.
226  * XXX Maybe we should use phys addr here and kmap()/kunmap().
227  * Used by the verbs layer.
228  */
229 struct ipath_seg {
230 	void *vaddr;
231 	size_t length;
232 };
233 
234 /* The number of ipath_segs that fit in a page. */
235 #define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
236 
237 struct ipath_segarray {
238 	struct ipath_seg segs[IPATH_SEGSZ];
239 };
240 
241 struct ipath_mregion {
242 	struct ib_pd *pd;	/* shares refcnt of ibmr.pd */
243 	u64 user_base;		/* User's address for this region */
244 	u64 iova;		/* IB start address of this region */
245 	size_t length;
246 	u32 lkey;
247 	u32 offset;		/* offset (bytes) to start of region */
248 	int access_flags;
249 	u32 max_segs;		/* number of ipath_segs in all the arrays */
250 	u32 mapsz;		/* size of the map array */
251 	struct ipath_segarray *map[0];	/* the segments */
252 };
253 
254 /*
255  * These keep track of the copy progress within a memory region.
256  * Used by the verbs layer.
257  */
258 struct ipath_sge {
259 	struct ipath_mregion *mr;
260 	void *vaddr;		/* kernel virtual address of segment */
261 	u32 sge_length;		/* length of the SGE */
262 	u32 length;		/* remaining length of the segment */
263 	u16 m;			/* current index: mr->map[m] */
264 	u16 n;			/* current index: mr->map[m]->segs[n] */
265 };
266 
267 /* Memory region */
268 struct ipath_mr {
269 	struct ib_mr ibmr;
270 	struct ib_umem *umem;
271 	struct ipath_mregion mr;	/* must be last */
272 };
273 
274 /*
275  * Send work request queue entry.
276  * The size of the sg_list is determined when the QP is created and stored
277  * in qp->s_max_sge.
278  */
279 struct ipath_swqe {
280 	union {
281 		struct ib_send_wr wr;   /* don't use wr.sg_list */
282 		struct ib_ud_wr ud_wr;
283 		struct ib_rdma_wr rdma_wr;
284 		struct ib_atomic_wr atomic_wr;
285 	};
286 
287 	u32 psn;		/* first packet sequence number */
288 	u32 lpsn;		/* last packet sequence number */
289 	u32 ssn;		/* send sequence number */
290 	u32 length;		/* total length of data in sg_list */
291 	struct ipath_sge sg_list[0];
292 };
293 
294 /*
295  * Receive work request queue entry.
296  * The size of the sg_list is determined when the QP (or SRQ) is created
297  * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
298  */
299 struct ipath_rwqe {
300 	u64 wr_id;
301 	u8 num_sge;
302 	struct ib_sge sg_list[0];
303 };
304 
305 /*
306  * This structure is used to contain the head pointer, tail pointer,
307  * and receive work queue entries as a single memory allocation so
308  * it can be mmap'ed into user space.
309  * Note that the wq array elements are variable size so you can't
310  * just index into the array to get the N'th element;
311  * use get_rwqe_ptr() instead.
312  */
313 struct ipath_rwq {
314 	u32 head;		/* new work requests posted to the head */
315 	u32 tail;		/* receives pull requests from here. */
316 	struct ipath_rwqe wq[0];
317 };
318 
319 struct ipath_rq {
320 	struct ipath_rwq *wq;
321 	spinlock_t lock;
322 	u32 size;		/* size of RWQE array */
323 	u8 max_sge;
324 };
325 
326 struct ipath_srq {
327 	struct ib_srq ibsrq;
328 	struct ipath_rq rq;
329 	struct ipath_mmap_info *ip;
330 	/* send signal when number of RWQEs < limit */
331 	u32 limit;
332 };
333 
334 struct ipath_sge_state {
335 	struct ipath_sge *sg_list;      /* next SGE to be used if any */
336 	struct ipath_sge sge;   /* progress state for the current SGE */
337 	u8 num_sge;
338 	u8 static_rate;
339 };
340 
341 /*
342  * This structure holds the information that the send tasklet needs
343  * to send a RDMA read response or atomic operation.
344  */
345 struct ipath_ack_entry {
346 	u8 opcode;
347 	u8 sent;
348 	u32 psn;
349 	union {
350 		struct ipath_sge_state rdma_sge;
351 		u64 atomic_data;
352 	};
353 };
354 
355 /*
356  * Variables prefixed with s_ are for the requester (sender).
357  * Variables prefixed with r_ are for the responder (receiver).
358  * Variables prefixed with ack_ are for responder replies.
359  *
360  * Common variables are protected by both r_rq.lock and s_lock in that order
361  * which only happens in modify_qp() or changing the QP 'state'.
362  */
363 struct ipath_qp {
364 	struct ib_qp ibqp;
365 	struct ipath_qp *next;		/* link list for QPN hash table */
366 	struct ipath_qp *timer_next;	/* link list for ipath_ib_timer() */
367 	struct ipath_qp *pio_next;	/* link for ipath_ib_piobufavail() */
368 	struct list_head piowait;	/* link for wait PIO buf */
369 	struct list_head timerwait;	/* link for waiting for timeouts */
370 	struct ib_ah_attr remote_ah_attr;
371 	struct ipath_ib_header s_hdr;	/* next packet header to send */
372 	atomic_t refcount;
373 	wait_queue_head_t wait;
374 	wait_queue_head_t wait_dma;
375 	struct tasklet_struct s_task;
376 	struct ipath_mmap_info *ip;
377 	struct ipath_sge_state *s_cur_sge;
378 	struct ipath_verbs_txreq *s_tx;
379 	struct ipath_sge_state s_sge;	/* current send request data */
380 	struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
381 	struct ipath_sge_state s_ack_rdma_sge;
382 	struct ipath_sge_state s_rdma_read_sge;
383 	struct ipath_sge_state r_sge;	/* current receive data */
384 	spinlock_t s_lock;
385 	atomic_t s_dma_busy;
386 	u16 s_pkt_delay;
387 	u16 s_hdrwords;		/* size of s_hdr in 32 bit words */
388 	u32 s_cur_size;		/* size of send packet in bytes */
389 	u32 s_len;		/* total length of s_sge */
390 	u32 s_rdma_read_len;	/* total length of s_rdma_read_sge */
391 	u32 s_next_psn;		/* PSN for next request */
392 	u32 s_last_psn;		/* last response PSN processed */
393 	u32 s_psn;		/* current packet sequence number */
394 	u32 s_ack_rdma_psn;	/* PSN for sending RDMA read responses */
395 	u32 s_ack_psn;		/* PSN for acking sends and RDMA writes */
396 	u32 s_rnr_timeout;	/* number of milliseconds for RNR timeout */
397 	u32 r_ack_psn;		/* PSN for next ACK or atomic ACK */
398 	u64 r_wr_id;		/* ID for current receive WQE */
399 	unsigned long r_aflags;
400 	u32 r_len;		/* total length of r_sge */
401 	u32 r_rcv_len;		/* receive data len processed */
402 	u32 r_psn;		/* expected rcv packet sequence number */
403 	u32 r_msn;		/* message sequence number */
404 	u8 state;		/* QP state */
405 	u8 s_state;		/* opcode of last packet sent */
406 	u8 s_ack_state;		/* opcode of packet to ACK */
407 	u8 s_nak_state;		/* non-zero if NAK is pending */
408 	u8 r_state;		/* opcode of last packet received */
409 	u8 r_nak_state;		/* non-zero if NAK is pending */
410 	u8 r_min_rnr_timer;	/* retry timeout value for RNR NAKs */
411 	u8 r_flags;
412 	u8 r_max_rd_atomic;	/* max number of RDMA read/atomic to receive */
413 	u8 r_head_ack_queue;	/* index into s_ack_queue[] */
414 	u8 qp_access_flags;
415 	u8 s_max_sge;		/* size of s_wq->sg_list */
416 	u8 s_retry_cnt;		/* number of times to retry */
417 	u8 s_rnr_retry_cnt;
418 	u8 s_retry;		/* requester retry counter */
419 	u8 s_rnr_retry;		/* requester RNR retry counter */
420 	u8 s_pkey_index;	/* PKEY index to use */
421 	u8 s_max_rd_atomic;	/* max number of RDMA read/atomic to send */
422 	u8 s_num_rd_atomic;	/* number of RDMA read/atomic pending */
423 	u8 s_tail_ack_queue;	/* index into s_ack_queue[] */
424 	u8 s_flags;
425 	u8 s_dmult;
426 	u8 s_draining;
427 	u8 timeout;		/* Timeout for this QP */
428 	enum ib_mtu path_mtu;
429 	u32 remote_qpn;
430 	u32 qkey;		/* QKEY for this QP (for UD or RD) */
431 	u32 s_size;		/* send work queue size */
432 	u32 s_head;		/* new entries added here */
433 	u32 s_tail;		/* next entry to process */
434 	u32 s_cur;		/* current work queue entry */
435 	u32 s_last;		/* last un-ACK'ed entry */
436 	u32 s_ssn;		/* SSN of tail entry */
437 	u32 s_lsn;		/* limit sequence number (credit) */
438 	struct ipath_swqe *s_wq;	/* send work queue */
439 	struct ipath_swqe *s_wqe;
440 	struct ipath_sge *r_ud_sg_list;
441 	struct ipath_rq r_rq;		/* receive work queue */
442 	struct ipath_sge r_sg_list[0];	/* verified SGEs */
443 };
444 
445 /*
446  * Atomic bit definitions for r_aflags.
447  */
448 #define IPATH_R_WRID_VALID	0
449 
450 /*
451  * Bit definitions for r_flags.
452  */
453 #define IPATH_R_REUSE_SGE	0x01
454 #define IPATH_R_RDMAR_SEQ	0x02
455 
456 /*
457  * Bit definitions for s_flags.
458  *
459  * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
460  *			   before processing the next SWQE
461  * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
462  *			   before processing the next SWQE
463  * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
464  * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
465  * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
466  *		      next send completion entry not via send DMA.
467  */
468 #define IPATH_S_SIGNAL_REQ_WR	0x01
469 #define IPATH_S_FENCE_PENDING	0x02
470 #define IPATH_S_RDMAR_PENDING	0x04
471 #define IPATH_S_ACK_PENDING	0x08
472 #define IPATH_S_BUSY		0x10
473 #define IPATH_S_WAITING		0x20
474 #define IPATH_S_WAIT_SSN_CREDIT	0x40
475 #define IPATH_S_WAIT_DMA	0x80
476 
477 #define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
478 	IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
479 
480 #define IPATH_PSN_CREDIT	512
481 
482 /*
483  * Since struct ipath_swqe is not a fixed size, we can't simply index into
484  * struct ipath_qp.s_wq.  This function does the array index computation.
485  */
get_swqe_ptr(struct ipath_qp * qp,unsigned n)486 static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
487 					      unsigned n)
488 {
489 	return (struct ipath_swqe *)((char *)qp->s_wq +
490 				     (sizeof(struct ipath_swqe) +
491 				      qp->s_max_sge *
492 				      sizeof(struct ipath_sge)) * n);
493 }
494 
495 /*
496  * Since struct ipath_rwqe is not a fixed size, we can't simply index into
497  * struct ipath_rwq.wq.  This function does the array index computation.
498  */
get_rwqe_ptr(struct ipath_rq * rq,unsigned n)499 static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
500 					      unsigned n)
501 {
502 	return (struct ipath_rwqe *)
503 		((char *) rq->wq->wq +
504 		 (sizeof(struct ipath_rwqe) +
505 		  rq->max_sge * sizeof(struct ib_sge)) * n);
506 }
507 
508 /*
509  * QPN-map pages start out as NULL, they get allocated upon
510  * first use and are never deallocated. This way,
511  * large bitmaps are not allocated unless large numbers of QPs are used.
512  */
513 struct qpn_map {
514 	atomic_t n_free;
515 	void *page;
516 };
517 
518 struct ipath_qp_table {
519 	spinlock_t lock;
520 	u32 last;		/* last QP number allocated */
521 	u32 max;		/* size of the hash table */
522 	u32 nmaps;		/* size of the map table */
523 	struct ipath_qp **table;
524 	/* bit map of free numbers */
525 	struct qpn_map map[QPNMAP_ENTRIES];
526 };
527 
528 struct ipath_lkey_table {
529 	spinlock_t lock;
530 	u32 next;		/* next unused index (speeds search) */
531 	u32 gen;		/* generation count */
532 	u32 max;		/* size of the table */
533 	struct ipath_mregion **table;
534 };
535 
536 struct ipath_opcode_stats {
537 	u64 n_packets;		/* number of packets */
538 	u64 n_bytes;		/* total number of bytes */
539 };
540 
541 struct ipath_ibdev {
542 	struct ib_device ibdev;
543 	struct ipath_devdata *dd;
544 	struct list_head pending_mmaps;
545 	spinlock_t mmap_offset_lock;
546 	u32 mmap_offset;
547 	int ib_unit;		/* This is the device number */
548 	u16 sm_lid;		/* in host order */
549 	u8 sm_sl;
550 	u8 mkeyprot;
551 	/* non-zero when timer is set */
552 	unsigned long mkey_lease_timeout;
553 
554 	/* The following fields are really per port. */
555 	struct ipath_qp_table qp_table;
556 	struct ipath_lkey_table lk_table;
557 	struct list_head pending[3];	/* FIFO of QPs waiting for ACKs */
558 	struct list_head piowait;	/* list for wait PIO buf */
559 	struct list_head txreq_free;
560 	void *txreq_bufs;
561 	/* list of QPs waiting for RNR timer */
562 	struct list_head rnrwait;
563 	spinlock_t pending_lock;
564 	__be64 sys_image_guid;	/* in network order */
565 	__be64 gid_prefix;	/* in network order */
566 	__be64 mkey;
567 
568 	u32 n_pds_allocated;	/* number of PDs allocated for device */
569 	spinlock_t n_pds_lock;
570 	u32 n_ahs_allocated;	/* number of AHs allocated for device */
571 	spinlock_t n_ahs_lock;
572 	u32 n_cqs_allocated;	/* number of CQs allocated for device */
573 	spinlock_t n_cqs_lock;
574 	u32 n_qps_allocated;	/* number of QPs allocated for device */
575 	spinlock_t n_qps_lock;
576 	u32 n_srqs_allocated;	/* number of SRQs allocated for device */
577 	spinlock_t n_srqs_lock;
578 	u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
579 	spinlock_t n_mcast_grps_lock;
580 
581 	u64 ipath_sword;	/* total dwords sent (sample result) */
582 	u64 ipath_rword;	/* total dwords received (sample result) */
583 	u64 ipath_spkts;	/* total packets sent (sample result) */
584 	u64 ipath_rpkts;	/* total packets received (sample result) */
585 	/* # of ticks no data sent (sample result) */
586 	u64 ipath_xmit_wait;
587 	u64 rcv_errors;		/* # of packets with SW detected rcv errs */
588 	u64 n_unicast_xmit;	/* total unicast packets sent */
589 	u64 n_unicast_rcv;	/* total unicast packets received */
590 	u64 n_multicast_xmit;	/* total multicast packets sent */
591 	u64 n_multicast_rcv;	/* total multicast packets received */
592 	u64 z_symbol_error_counter;		/* starting count for PMA */
593 	u64 z_link_error_recovery_counter;	/* starting count for PMA */
594 	u64 z_link_downed_counter;		/* starting count for PMA */
595 	u64 z_port_rcv_errors;			/* starting count for PMA */
596 	u64 z_port_rcv_remphys_errors;		/* starting count for PMA */
597 	u64 z_port_xmit_discards;		/* starting count for PMA */
598 	u64 z_port_xmit_data;			/* starting count for PMA */
599 	u64 z_port_rcv_data;			/* starting count for PMA */
600 	u64 z_port_xmit_packets;		/* starting count for PMA */
601 	u64 z_port_rcv_packets;			/* starting count for PMA */
602 	u32 z_pkey_violations;			/* starting count for PMA */
603 	u32 z_local_link_integrity_errors;	/* starting count for PMA */
604 	u32 z_excessive_buffer_overrun_errors;	/* starting count for PMA */
605 	u32 z_vl15_dropped;			/* starting count for PMA */
606 	u32 n_rc_resends;
607 	u32 n_rc_acks;
608 	u32 n_rc_qacks;
609 	u32 n_seq_naks;
610 	u32 n_rdma_seq;
611 	u32 n_rnr_naks;
612 	u32 n_other_naks;
613 	u32 n_timeouts;
614 	u32 n_pkt_drops;
615 	u32 n_vl15_dropped;
616 	u32 n_wqe_errs;
617 	u32 n_rdma_dup_busy;
618 	u32 n_piowait;
619 	u32 n_unaligned;
620 	u32 port_cap_flags;
621 	u32 pma_sample_start;
622 	u32 pma_sample_interval;
623 	__be16 pma_counter_select[5];
624 	u16 pma_tag;
625 	u16 qkey_violations;
626 	u16 mkey_violations;
627 	u16 mkey_lease_period;
628 	u16 pending_index;	/* which pending queue is active */
629 	u8 pma_sample_status;
630 	u8 subnet_timeout;
631 	u8 vl_high_limit;
632 	struct ipath_opcode_stats opstats[128];
633 };
634 
635 struct ipath_verbs_counters {
636 	u64 symbol_error_counter;
637 	u64 link_error_recovery_counter;
638 	u64 link_downed_counter;
639 	u64 port_rcv_errors;
640 	u64 port_rcv_remphys_errors;
641 	u64 port_xmit_discards;
642 	u64 port_xmit_data;
643 	u64 port_rcv_data;
644 	u64 port_xmit_packets;
645 	u64 port_rcv_packets;
646 	u32 local_link_integrity_errors;
647 	u32 excessive_buffer_overrun_errors;
648 	u32 vl15_dropped;
649 };
650 
651 struct ipath_verbs_txreq {
652 	struct ipath_qp         *qp;
653 	struct ipath_swqe       *wqe;
654 	u32                      map_len;
655 	u32                      len;
656 	struct ipath_sge_state  *ss;
657 	struct ipath_pio_header  hdr;
658 	struct ipath_sdma_txreq  txreq;
659 };
660 
to_imr(struct ib_mr * ibmr)661 static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
662 {
663 	return container_of(ibmr, struct ipath_mr, ibmr);
664 }
665 
to_ipd(struct ib_pd * ibpd)666 static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
667 {
668 	return container_of(ibpd, struct ipath_pd, ibpd);
669 }
670 
to_iah(struct ib_ah * ibah)671 static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
672 {
673 	return container_of(ibah, struct ipath_ah, ibah);
674 }
675 
to_icq(struct ib_cq * ibcq)676 static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
677 {
678 	return container_of(ibcq, struct ipath_cq, ibcq);
679 }
680 
to_isrq(struct ib_srq * ibsrq)681 static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
682 {
683 	return container_of(ibsrq, struct ipath_srq, ibsrq);
684 }
685 
to_iqp(struct ib_qp * ibqp)686 static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
687 {
688 	return container_of(ibqp, struct ipath_qp, ibqp);
689 }
690 
to_idev(struct ib_device * ibdev)691 static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
692 {
693 	return container_of(ibdev, struct ipath_ibdev, ibdev);
694 }
695 
696 /*
697  * This must be called with s_lock held.
698  */
ipath_schedule_send(struct ipath_qp * qp)699 static inline void ipath_schedule_send(struct ipath_qp *qp)
700 {
701 	if (qp->s_flags & IPATH_S_ANY_WAIT)
702 		qp->s_flags &= ~IPATH_S_ANY_WAIT;
703 	if (!(qp->s_flags & IPATH_S_BUSY))
704 		tasklet_hi_schedule(&qp->s_task);
705 }
706 
707 int ipath_process_mad(struct ib_device *ibdev,
708 		      int mad_flags,
709 		      u8 port_num,
710 		      const struct ib_wc *in_wc,
711 		      const struct ib_grh *in_grh,
712 		      const struct ib_mad_hdr *in, size_t in_mad_size,
713 		      struct ib_mad_hdr *out, size_t *out_mad_size,
714 		      u16 *out_mad_pkey_index);
715 
716 /*
717  * Compare the lower 24 bits of the two values.
718  * Returns an integer <, ==, or > than zero.
719  */
ipath_cmp24(u32 a,u32 b)720 static inline int ipath_cmp24(u32 a, u32 b)
721 {
722 	return (((int) a) - ((int) b)) << 8;
723 }
724 
725 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
726 
727 int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
728 			    u64 *rwords, u64 *spkts, u64 *rpkts,
729 			    u64 *xmit_wait);
730 
731 int ipath_get_counters(struct ipath_devdata *dd,
732 		       struct ipath_verbs_counters *cntrs);
733 
734 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
735 
736 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
737 
738 int ipath_mcast_tree_empty(void);
739 
740 __be32 ipath_compute_aeth(struct ipath_qp *qp);
741 
742 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
743 
744 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
745 			      struct ib_qp_init_attr *init_attr,
746 			      struct ib_udata *udata);
747 
748 int ipath_destroy_qp(struct ib_qp *ibqp);
749 
750 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
751 
752 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
753 		    int attr_mask, struct ib_udata *udata);
754 
755 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
756 		   int attr_mask, struct ib_qp_init_attr *init_attr);
757 
758 unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
759 
760 int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
761 
762 void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
763 
764 unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
765 
766 int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
767 		     u32 hdrwords, struct ipath_sge_state *ss, u32 len);
768 
769 void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
770 
771 void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
772 
773 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
774 		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
775 
776 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
777 		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
778 
779 void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
780 
781 void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
782 
783 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
784 
785 void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
786 		  int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
787 
788 int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
789 		     struct ipath_mregion *mr);
790 
791 void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
792 
793 int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
794 		  struct ib_sge *sge, int acc);
795 
796 int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
797 		  u32 len, u64 vaddr, u32 rkey, int acc);
798 
799 int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
800 			   struct ib_recv_wr **bad_wr);
801 
802 struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
803 				struct ib_srq_init_attr *srq_init_attr,
804 				struct ib_udata *udata);
805 
806 int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
807 		     enum ib_srq_attr_mask attr_mask,
808 		     struct ib_udata *udata);
809 
810 int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
811 
812 int ipath_destroy_srq(struct ib_srq *ibsrq);
813 
814 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
815 
816 int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
817 
818 struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
819 			      const struct ib_cq_init_attr *attr,
820 			      struct ib_ucontext *context,
821 			      struct ib_udata *udata);
822 
823 int ipath_destroy_cq(struct ib_cq *ibcq);
824 
825 int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
826 
827 int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
828 
829 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
830 
831 struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
832 				struct ib_phys_buf *buffer_list,
833 				int num_phys_buf, int acc, u64 *iova_start);
834 
835 struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
836 				u64 virt_addr, int mr_access_flags,
837 				struct ib_udata *udata);
838 
839 int ipath_dereg_mr(struct ib_mr *ibmr);
840 
841 struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
842 			       struct ib_fmr_attr *fmr_attr);
843 
844 int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
845 		       int list_len, u64 iova);
846 
847 int ipath_unmap_fmr(struct list_head *fmr_list);
848 
849 int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
850 
851 void ipath_release_mmap_info(struct kref *ref);
852 
853 struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
854 					       u32 size,
855 					       struct ib_ucontext *context,
856 					       void *obj);
857 
858 void ipath_update_mmap_info(struct ipath_ibdev *dev,
859 			    struct ipath_mmap_info *ip,
860 			    u32 size, void *obj);
861 
862 int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
863 
864 void ipath_insert_rnr_queue(struct ipath_qp *qp);
865 
866 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
867 		   u32 *lengthp, struct ipath_sge_state *ss);
868 
869 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
870 
871 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
872 		   struct ib_global_route *grh, u32 hwords, u32 nwords);
873 
874 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
875 			   struct ipath_other_headers *ohdr,
876 			   u32 bth0, u32 bth2);
877 
878 void ipath_do_send(unsigned long data);
879 
880 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
881 			 enum ib_wc_status status);
882 
883 int ipath_make_rc_req(struct ipath_qp *qp);
884 
885 int ipath_make_uc_req(struct ipath_qp *qp);
886 
887 int ipath_make_ud_req(struct ipath_qp *qp);
888 
889 int ipath_register_ib_device(struct ipath_devdata *);
890 
891 void ipath_unregister_ib_device(struct ipath_ibdev *);
892 
893 void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
894 
895 int ipath_ib_piobufavail(struct ipath_ibdev *);
896 
897 unsigned ipath_get_npkeys(struct ipath_devdata *);
898 
899 u32 ipath_get_cr_errpkey(struct ipath_devdata *);
900 
901 unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
902 
903 extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
904 
905 /*
906  * Below converts HCA-specific LinkTrainingState to IB PhysPortState
907  * values.
908  */
909 extern const u8 ipath_cvt_physportstate[];
910 #define IB_PHYSPORTSTATE_SLEEP 1
911 #define IB_PHYSPORTSTATE_POLL 2
912 #define IB_PHYSPORTSTATE_DISABLED 3
913 #define IB_PHYSPORTSTATE_CFG_TRAIN 4
914 #define IB_PHYSPORTSTATE_LINKUP 5
915 #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
916 
917 extern const int ib_ipath_state_ops[];
918 
919 extern unsigned int ib_ipath_lkey_table_size;
920 
921 extern unsigned int ib_ipath_max_cqes;
922 
923 extern unsigned int ib_ipath_max_cqs;
924 
925 extern unsigned int ib_ipath_max_qp_wrs;
926 
927 extern unsigned int ib_ipath_max_qps;
928 
929 extern unsigned int ib_ipath_max_sges;
930 
931 extern unsigned int ib_ipath_max_mcast_grps;
932 
933 extern unsigned int ib_ipath_max_mcast_qp_attached;
934 
935 extern unsigned int ib_ipath_max_srqs;
936 
937 extern unsigned int ib_ipath_max_srq_sges;
938 
939 extern unsigned int ib_ipath_max_srq_wrs;
940 
941 extern const u32 ib_ipath_rnr_table[];
942 
943 extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
944 
945 #endif				/* IPATH_VERBS_H */
946