1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef RXE_LOC_H
35 #define RXE_LOC_H
36
37 /* rxe_av.c */
38
39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr);
40
41 int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
42 struct rxe_av *av, struct ib_ah_attr *attr);
43
44 int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
45 struct ib_ah_attr *attr);
46
47 int rxe_av_fill_ip_info(struct rxe_dev *rxe,
48 struct rxe_av *av,
49 struct ib_ah_attr *attr,
50 struct ib_gid_attr *sgid_attr,
51 union ib_gid *sgid);
52
53 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
54
55 /* rxe_cq.c */
56 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
57 int cqe, int comp_vector, struct ib_udata *udata);
58
59 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
60 int comp_vector, struct ib_ucontext *context,
61 struct ib_udata *udata);
62
63 int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
64
65 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
66
67 void rxe_cq_cleanup(void *arg);
68
69 /* rxe_mcast.c */
70 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
71 struct rxe_mc_grp **grp_p);
72
73 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
74 struct rxe_mc_grp *grp);
75
76 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
77 union ib_gid *mgid);
78
79 void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
80
81 void rxe_mc_cleanup(void *arg);
82
83 /* rxe_mmap.c */
84 struct rxe_mmap_info {
85 struct list_head pending_mmaps;
86 struct ib_ucontext *context;
87 struct kref ref;
88 void *obj;
89
90 struct mminfo info;
91 };
92
93 void rxe_mmap_release(struct kref *ref);
94
95 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
96 u32 size,
97 struct ib_ucontext *context,
98 void *obj);
99
100 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
101
102 /* rxe_mr.c */
103 enum copy_direction {
104 to_mem_obj,
105 from_mem_obj,
106 };
107
108 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
109 int access, struct rxe_mem *mem);
110
111 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
112 u64 length, u64 iova, int access, struct ib_udata *udata,
113 struct rxe_mem *mr);
114
115 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
116 int max_pages, struct rxe_mem *mem);
117
118 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
119 int length, enum copy_direction dir, u32 *crcp);
120
121 int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
122 struct rxe_dma_info *dma, void *addr, int length,
123 enum copy_direction dir, u32 *crcp);
124
125 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
126
127 enum lookup_type {
128 lookup_local,
129 lookup_remote,
130 };
131
132 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
133 enum lookup_type type);
134
135 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
136
137 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
138 u64 *page, int num_pages, u64 iova);
139
140 void rxe_mem_cleanup(void *arg);
141
142 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
143
144 /* rxe_qp.c */
145 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
146
147 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
148 struct ib_qp_init_attr *init, struct ib_udata *udata,
149 struct ib_pd *ibpd);
150
151 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
152
153 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
154 struct ib_qp_attr *attr, int mask);
155
156 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
157 int mask, struct ib_udata *udata);
158
159 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
160
161 void rxe_qp_error(struct rxe_qp *qp);
162
163 void rxe_qp_destroy(struct rxe_qp *qp);
164
165 void rxe_qp_cleanup(void *arg);
166
qp_num(struct rxe_qp * qp)167 static inline int qp_num(struct rxe_qp *qp)
168 {
169 return qp->ibqp.qp_num;
170 }
171
qp_type(struct rxe_qp * qp)172 static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
173 {
174 return qp->ibqp.qp_type;
175 }
176
qp_state(struct rxe_qp * qp)177 static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
178 {
179 return qp->attr.qp_state;
180 }
181
qp_mtu(struct rxe_qp * qp)182 static inline int qp_mtu(struct rxe_qp *qp)
183 {
184 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
185 return qp->attr.path_mtu;
186 else
187 return RXE_PORT_MAX_MTU;
188 }
189
rcv_wqe_size(int max_sge)190 static inline int rcv_wqe_size(int max_sge)
191 {
192 return sizeof(struct rxe_recv_wqe) +
193 max_sge * sizeof(struct ib_sge);
194 }
195
196 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
197
rxe_advance_resp_resource(struct rxe_qp * qp)198 static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
199 {
200 qp->resp.res_head++;
201 if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
202 qp->resp.res_head = 0;
203 }
204
205 void retransmit_timer(unsigned long data);
206 void rnr_nak_timer(unsigned long data);
207
208 void dump_qp(struct rxe_qp *qp);
209
210 /* rxe_srq.c */
211 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
212
213 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
214 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
215
216 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
217 struct ib_srq_init_attr *init,
218 struct ib_ucontext *context, struct ib_udata *udata);
219
220 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
221 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
222 struct ib_udata *udata);
223
224 extern struct ib_dma_mapping_ops rxe_dma_mapping_ops;
225
226 void rxe_release(struct kref *kref);
227
228 int rxe_completer(void *arg);
229 int rxe_requester(void *arg);
230 int rxe_responder(void *arg);
231
232 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
233
234 void rxe_resp_queue_pkt(struct rxe_dev *rxe,
235 struct rxe_qp *qp, struct sk_buff *skb);
236
237 void rxe_comp_queue_pkt(struct rxe_dev *rxe,
238 struct rxe_qp *qp, struct sk_buff *skb);
239
wr_opcode_mask(int opcode,struct rxe_qp * qp)240 static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp)
241 {
242 return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
243 }
244
rxe_xmit_packet(struct rxe_dev * rxe,struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct sk_buff * skb)245 static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
246 struct rxe_pkt_info *pkt, struct sk_buff *skb)
247 {
248 int err;
249 int is_request = pkt->mask & RXE_REQ_MASK;
250
251 if ((is_request && (qp->req.state != QP_STATE_READY)) ||
252 (!is_request && (qp->resp.state != QP_STATE_READY))) {
253 pr_info("Packet dropped. QP is not in ready state\n");
254 goto drop;
255 }
256
257 if (pkt->mask & RXE_LOOPBACK_MASK) {
258 memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
259 err = rxe->ifc_ops->loopback(skb);
260 } else {
261 err = rxe->ifc_ops->send(rxe, pkt, skb);
262 }
263
264 if (err) {
265 rxe->xmit_errors++;
266 return err;
267 }
268
269 atomic_inc(&qp->skb_out);
270
271 if ((qp_type(qp) != IB_QPT_RC) &&
272 (pkt->mask & RXE_END_MASK)) {
273 pkt->wqe->state = wqe_state_done;
274 rxe_run_task(&qp->comp.task, 1);
275 }
276
277 goto done;
278
279 drop:
280 kfree_skb(skb);
281 err = 0;
282 done:
283 return err;
284 }
285
286 #endif /* RXE_LOC_H */
287