1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #ifndef RXE_LOC_H
35 #define RXE_LOC_H
36
37 /* rxe_av.c */
38
39 int rxe_av_chk_attr(struct rxe_dev *rxe, struct rdma_ah_attr *attr);
40
41 int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
42 struct rxe_av *av, struct rdma_ah_attr *attr);
43
44 int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
45 struct rdma_ah_attr *attr);
46
47 int rxe_av_fill_ip_info(struct rxe_dev *rxe,
48 struct rxe_av *av,
49 struct rdma_ah_attr *attr,
50 struct ib_gid_attr *sgid_attr,
51 union ib_gid *sgid);
52
53 struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
54
55 /* rxe_cq.c */
56 int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
57 int cqe, int comp_vector, struct ib_udata *udata);
58
59 int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
60 int comp_vector, struct ib_ucontext *context,
61 struct ib_udata *udata);
62
63 int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
64
65 int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
66
67 void rxe_cq_disable(struct rxe_cq *cq);
68
69 void rxe_cq_cleanup(struct rxe_pool_entry *arg);
70
71 /* rxe_mcast.c */
72 int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
73 struct rxe_mc_grp **grp_p);
74
75 int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
76 struct rxe_mc_grp *grp);
77
78 int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
79 union ib_gid *mgid);
80
81 void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
82
83 void rxe_mc_cleanup(struct rxe_pool_entry *arg);
84
85 /* rxe_mmap.c */
86 struct rxe_mmap_info {
87 struct list_head pending_mmaps;
88 struct ib_ucontext *context;
89 struct kref ref;
90 void *obj;
91
92 struct mminfo info;
93 };
94
95 void rxe_mmap_release(struct kref *ref);
96
97 struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
98 u32 size,
99 struct ib_ucontext *context,
100 void *obj);
101
102 int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
103
104 /* rxe_mr.c */
105 enum copy_direction {
106 to_mem_obj,
107 from_mem_obj,
108 };
109
110 int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
111 int access, struct rxe_mem *mem);
112
113 int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
114 u64 length, u64 iova, int access, struct ib_udata *udata,
115 struct rxe_mem *mr);
116
117 int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
118 int max_pages, struct rxe_mem *mem);
119
120 int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
121 int length, enum copy_direction dir, u32 *crcp);
122
123 int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
124 struct rxe_dma_info *dma, void *addr, int length,
125 enum copy_direction dir, u32 *crcp);
126
127 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
128
129 enum lookup_type {
130 lookup_local,
131 lookup_remote,
132 };
133
134 struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
135 enum lookup_type type);
136
137 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
138
139 int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
140 u64 *page, int num_pages, u64 iova);
141
142 void rxe_mem_cleanup(struct rxe_pool_entry *arg);
143
144 int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
145
146 /* rxe_net.c */
147 int rxe_loopback(struct sk_buff *skb);
148 int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
149 struct sk_buff *skb);
150 struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
151 int paylen, struct rxe_pkt_info *pkt);
152 int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
153 struct sk_buff *skb, u32 *crc);
154 enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
155 const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
156 struct device *rxe_dma_device(struct rxe_dev *rxe);
157 int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid);
158 int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid);
159
160 /* rxe_qp.c */
161 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
162
163 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
164 struct ib_qp_init_attr *init, struct ib_udata *udata,
165 struct ib_pd *ibpd);
166
167 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
168
169 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
170 struct ib_qp_attr *attr, int mask);
171
172 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
173 int mask, struct ib_udata *udata);
174
175 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
176
177 void rxe_qp_error(struct rxe_qp *qp);
178
179 void rxe_qp_destroy(struct rxe_qp *qp);
180
181 void rxe_qp_cleanup(struct rxe_pool_entry *arg);
182
qp_num(struct rxe_qp * qp)183 static inline int qp_num(struct rxe_qp *qp)
184 {
185 return qp->ibqp.qp_num;
186 }
187
qp_type(struct rxe_qp * qp)188 static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
189 {
190 return qp->ibqp.qp_type;
191 }
192
qp_state(struct rxe_qp * qp)193 static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
194 {
195 return qp->attr.qp_state;
196 }
197
qp_mtu(struct rxe_qp * qp)198 static inline int qp_mtu(struct rxe_qp *qp)
199 {
200 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
201 return qp->attr.path_mtu;
202 else
203 return RXE_PORT_MAX_MTU;
204 }
205
rcv_wqe_size(int max_sge)206 static inline int rcv_wqe_size(int max_sge)
207 {
208 return sizeof(struct rxe_recv_wqe) +
209 max_sge * sizeof(struct ib_sge);
210 }
211
212 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
213
rxe_advance_resp_resource(struct rxe_qp * qp)214 static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
215 {
216 qp->resp.res_head++;
217 if (unlikely(qp->resp.res_head == qp->attr.max_dest_rd_atomic))
218 qp->resp.res_head = 0;
219 }
220
221 void retransmit_timer(unsigned long data);
222 void rnr_nak_timer(unsigned long data);
223
224 /* rxe_srq.c */
225 #define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
226
227 int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
228 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
229
230 int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
231 struct ib_srq_init_attr *init,
232 struct ib_ucontext *context, struct ib_udata *udata);
233
234 int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
235 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
236 struct ib_udata *udata);
237
238 void rxe_release(struct kref *kref);
239
240 int rxe_completer(void *arg);
241 int rxe_requester(void *arg);
242 int rxe_responder(void *arg);
243
244 u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
245
246 void rxe_resp_queue_pkt(struct rxe_dev *rxe,
247 struct rxe_qp *qp, struct sk_buff *skb);
248
249 void rxe_comp_queue_pkt(struct rxe_dev *rxe,
250 struct rxe_qp *qp, struct sk_buff *skb);
251
wr_opcode_mask(int opcode,struct rxe_qp * qp)252 static inline unsigned int wr_opcode_mask(int opcode, struct rxe_qp *qp)
253 {
254 return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
255 }
256
rxe_xmit_packet(struct rxe_dev * rxe,struct rxe_qp * qp,struct rxe_pkt_info * pkt,struct sk_buff * skb)257 static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
258 struct rxe_pkt_info *pkt, struct sk_buff *skb)
259 {
260 int err;
261 int is_request = pkt->mask & RXE_REQ_MASK;
262
263 if ((is_request && (qp->req.state != QP_STATE_READY)) ||
264 (!is_request && (qp->resp.state != QP_STATE_READY))) {
265 pr_info("Packet dropped. QP is not in ready state\n");
266 goto drop;
267 }
268
269 if (pkt->mask & RXE_LOOPBACK_MASK) {
270 memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
271 err = rxe_loopback(skb);
272 } else {
273 err = rxe_send(rxe, pkt, skb);
274 }
275
276 if (err) {
277 rxe->xmit_errors++;
278 rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
279 return err;
280 }
281
282 if ((qp_type(qp) != IB_QPT_RC) &&
283 (pkt->mask & RXE_END_MASK)) {
284 pkt->wqe->state = wqe_state_done;
285 rxe_run_task(&qp->comp.task, 1);
286 }
287
288 rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
289 goto done;
290
291 drop:
292 kfree_skb(skb);
293 err = 0;
294 done:
295 return err;
296 }
297
298 #endif /* RXE_LOC_H */
299