1 /*
2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/errno.h>
37
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_addr.h>
40
41 #include "usnic_abi.h"
42 #include "usnic_ib.h"
43 #include "usnic_common_util.h"
44 #include "usnic_ib_qp_grp.h"
45 #include "usnic_fwd.h"
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_transport.h"
49
50 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
51
usnic_ib_fw_string_to_u64(char * fw_ver_str,u64 * fw_ver)52 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
53 {
54 *fw_ver = (u64) *fw_ver_str;
55 }
56
usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp * qp_grp,struct ib_udata * udata)57 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
58 struct ib_udata *udata)
59 {
60 struct usnic_ib_dev *us_ibdev;
61 struct usnic_ib_create_qp_resp resp;
62 struct pci_dev *pdev;
63 struct vnic_dev_bar *bar;
64 struct usnic_vnic_res_chunk *chunk;
65 struct usnic_ib_qp_grp_flow *default_flow;
66 int i, err;
67
68 memset(&resp, 0, sizeof(resp));
69
70 us_ibdev = qp_grp->vf->pf;
71 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
72 if (!pdev) {
73 usnic_err("Failed to get pdev of qp_grp %d\n",
74 qp_grp->grp_id);
75 return -EFAULT;
76 }
77
78 bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
79 if (!bar) {
80 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
81 qp_grp->grp_id, pci_name(pdev));
82 return -EFAULT;
83 }
84
85 resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
86 resp.bar_bus_addr = bar->bus_addr;
87 resp.bar_len = bar->len;
88
89 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
90 if (IS_ERR_OR_NULL(chunk)) {
91 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
92 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
93 qp_grp->grp_id,
94 PTR_ERR(chunk));
95 return chunk ? PTR_ERR(chunk) : -ENOMEM;
96 }
97
98 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
99 resp.rq_cnt = chunk->cnt;
100 for (i = 0; i < chunk->cnt; i++)
101 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
102
103 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
104 if (IS_ERR_OR_NULL(chunk)) {
105 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
106 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
107 qp_grp->grp_id,
108 PTR_ERR(chunk));
109 return chunk ? PTR_ERR(chunk) : -ENOMEM;
110 }
111
112 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
113 resp.wq_cnt = chunk->cnt;
114 for (i = 0; i < chunk->cnt; i++)
115 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
116
117 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
118 if (IS_ERR_OR_NULL(chunk)) {
119 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
120 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
121 qp_grp->grp_id,
122 PTR_ERR(chunk));
123 return chunk ? PTR_ERR(chunk) : -ENOMEM;
124 }
125
126 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
127 resp.cq_cnt = chunk->cnt;
128 for (i = 0; i < chunk->cnt; i++)
129 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
130
131 default_flow = list_first_entry(&qp_grp->flows_lst,
132 struct usnic_ib_qp_grp_flow, link);
133 resp.transport = default_flow->trans_type;
134
135 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
136 if (err) {
137 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
138 return err;
139 }
140
141 return 0;
142 }
143
144 static struct usnic_ib_qp_grp*
find_free_vf_and_create_qp_grp(struct usnic_ib_dev * us_ibdev,struct usnic_ib_pd * pd,struct usnic_transport_spec * trans_spec,struct usnic_vnic_res_spec * res_spec)145 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
146 struct usnic_ib_pd *pd,
147 struct usnic_transport_spec *trans_spec,
148 struct usnic_vnic_res_spec *res_spec)
149 {
150 struct usnic_ib_vf *vf;
151 struct usnic_vnic *vnic;
152 struct usnic_ib_qp_grp *qp_grp;
153 struct device *dev, **dev_list;
154 int i, found = 0;
155
156 BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
157
158 if (list_empty(&us_ibdev->vf_dev_list)) {
159 usnic_info("No vfs to allocate\n");
160 return NULL;
161 }
162
163 if (usnic_ib_share_vf) {
164 /* Try to find resouces on a used vf which is in pd */
165 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
166 for (i = 0; dev_list[i]; i++) {
167 dev = dev_list[i];
168 vf = pci_get_drvdata(to_pci_dev(dev));
169 spin_lock(&vf->lock);
170 vnic = vf->vnic;
171 if (!usnic_vnic_check_room(vnic, res_spec)) {
172 usnic_dbg("Found used vnic %s from %s\n",
173 us_ibdev->ib_dev.name,
174 pci_name(usnic_vnic_get_pdev(
175 vnic)));
176 found = 1;
177 break;
178 }
179 spin_unlock(&vf->lock);
180
181 }
182 usnic_uiom_free_dev_list(dev_list);
183 dev_list = NULL;
184 }
185
186 if (!found) {
187 /* Try to find resources on an unused vf */
188 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
189 spin_lock(&vf->lock);
190 vnic = vf->vnic;
191 if (vf->qp_grp_ref_cnt == 0 &&
192 usnic_vnic_check_room(vnic, res_spec) == 0) {
193 found = 1;
194 break;
195 }
196 spin_unlock(&vf->lock);
197 }
198 }
199
200 if (!found) {
201 usnic_info("No free qp grp found on %s\n",
202 us_ibdev->ib_dev.name);
203 return ERR_PTR(-ENOMEM);
204 }
205
206 qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
207 trans_spec);
208 spin_unlock(&vf->lock);
209 if (IS_ERR_OR_NULL(qp_grp)) {
210 usnic_err("Failed to allocate qp_grp\n");
211 if (usnic_ib_share_vf)
212 usnic_uiom_free_dev_list(dev_list);
213 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
214 }
215
216 return qp_grp;
217 }
218
qp_grp_destroy(struct usnic_ib_qp_grp * qp_grp)219 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
220 {
221 struct usnic_ib_vf *vf = qp_grp->vf;
222
223 WARN_ON(qp_grp->state != IB_QPS_RESET);
224
225 spin_lock(&vf->lock);
226 usnic_ib_qp_grp_destroy(qp_grp);
227 spin_unlock(&vf->lock);
228 }
229
eth_speed_to_ib_speed(int speed,u8 * active_speed,u8 * active_width)230 static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
231 u8 *active_width)
232 {
233 if (speed <= 10000) {
234 *active_width = IB_WIDTH_1X;
235 *active_speed = IB_SPEED_FDR10;
236 } else if (speed <= 20000) {
237 *active_width = IB_WIDTH_4X;
238 *active_speed = IB_SPEED_DDR;
239 } else if (speed <= 30000) {
240 *active_width = IB_WIDTH_4X;
241 *active_speed = IB_SPEED_QDR;
242 } else if (speed <= 40000) {
243 *active_width = IB_WIDTH_4X;
244 *active_speed = IB_SPEED_FDR10;
245 } else {
246 *active_width = IB_WIDTH_4X;
247 *active_speed = IB_SPEED_EDR;
248 }
249 }
250
create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)251 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
252 {
253 if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
254 cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
255 return -EINVAL;
256
257 return 0;
258 }
259
260 /* Start of ib callback functions */
261
usnic_ib_port_link_layer(struct ib_device * device,u8 port_num)262 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
263 u8 port_num)
264 {
265 return IB_LINK_LAYER_ETHERNET;
266 }
267
usnic_ib_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * uhw)268 int usnic_ib_query_device(struct ib_device *ibdev,
269 struct ib_device_attr *props,
270 struct ib_udata *uhw)
271 {
272 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
273 union ib_gid gid;
274 struct ethtool_drvinfo info;
275 struct ethtool_cmd cmd;
276 int qp_per_vf;
277
278 usnic_dbg("\n");
279 if (uhw->inlen || uhw->outlen)
280 return -EINVAL;
281
282 mutex_lock(&us_ibdev->usdev_lock);
283 us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
284 us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
285 memset(props, 0, sizeof(*props));
286 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
287 &gid.raw[0]);
288 memcpy(&props->sys_image_guid, &gid.global.interface_id,
289 sizeof(gid.global.interface_id));
290 usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
291 props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
292 props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
293 props->vendor_id = PCI_VENDOR_ID_CISCO;
294 props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
295 props->hw_ver = us_ibdev->pdev->subsystem_device;
296 qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
297 us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
298 props->max_qp = qp_per_vf *
299 atomic_read(&us_ibdev->vf_cnt.refcount);
300 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
301 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
302 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
303 atomic_read(&us_ibdev->vf_cnt.refcount);
304 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
305 props->max_mr = USNIC_UIOM_MAX_MR_CNT;
306 props->local_ca_ack_delay = 0;
307 props->max_pkeys = 0;
308 props->atomic_cap = IB_ATOMIC_NONE;
309 props->masked_atomic_cap = props->atomic_cap;
310 props->max_qp_rd_atom = 0;
311 props->max_qp_init_rd_atom = 0;
312 props->max_res_rd_atom = 0;
313 props->max_srq = 0;
314 props->max_srq_wr = 0;
315 props->max_srq_sge = 0;
316 props->max_fast_reg_page_list_len = 0;
317 props->max_mcast_grp = 0;
318 props->max_mcast_qp_attach = 0;
319 props->max_total_mcast_qp_attach = 0;
320 props->max_map_per_fmr = 0;
321 /* Owned by Userspace
322 * max_qp_wr, max_sge, max_sge_rd, max_cqe */
323 mutex_unlock(&us_ibdev->usdev_lock);
324
325 return 0;
326 }
327
usnic_ib_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)328 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
329 struct ib_port_attr *props)
330 {
331 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
332 struct ethtool_cmd cmd;
333
334 usnic_dbg("\n");
335
336 mutex_lock(&us_ibdev->usdev_lock);
337 us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
338 memset(props, 0, sizeof(*props));
339
340 props->lid = 0;
341 props->lmc = 1;
342 props->sm_lid = 0;
343 props->sm_sl = 0;
344
345 if (!us_ibdev->ufdev->link_up) {
346 props->state = IB_PORT_DOWN;
347 props->phys_state = 3;
348 } else if (!us_ibdev->ufdev->inaddr) {
349 props->state = IB_PORT_INIT;
350 props->phys_state = 4;
351 } else {
352 props->state = IB_PORT_ACTIVE;
353 props->phys_state = 5;
354 }
355
356 props->port_cap_flags = 0;
357 props->gid_tbl_len = 1;
358 props->pkey_tbl_len = 1;
359 props->bad_pkey_cntr = 0;
360 props->qkey_viol_cntr = 0;
361 eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
362 &props->active_width);
363 props->max_mtu = IB_MTU_4096;
364 props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
365 /* Userspace will adjust for hdrs */
366 props->max_msg_sz = us_ibdev->ufdev->mtu;
367 props->max_vl_num = 1;
368 mutex_unlock(&us_ibdev->usdev_lock);
369
370 return 0;
371 }
372
usnic_ib_query_qp(struct ib_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)373 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
374 int qp_attr_mask,
375 struct ib_qp_init_attr *qp_init_attr)
376 {
377 struct usnic_ib_qp_grp *qp_grp;
378 struct usnic_ib_vf *vf;
379 int err;
380
381 usnic_dbg("\n");
382
383 memset(qp_attr, 0, sizeof(*qp_attr));
384 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
385
386 qp_grp = to_uqp_grp(qp);
387 vf = qp_grp->vf;
388 mutex_lock(&vf->pf->usdev_lock);
389 usnic_dbg("\n");
390 qp_attr->qp_state = qp_grp->state;
391 qp_attr->cur_qp_state = qp_grp->state;
392
393 switch (qp_grp->ibqp.qp_type) {
394 case IB_QPT_UD:
395 qp_attr->qkey = 0;
396 break;
397 default:
398 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
399 err = -EINVAL;
400 goto err_out;
401 }
402
403 mutex_unlock(&vf->pf->usdev_lock);
404 return 0;
405
406 err_out:
407 mutex_unlock(&vf->pf->usdev_lock);
408 return err;
409 }
410
usnic_ib_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)411 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
412 union ib_gid *gid)
413 {
414
415 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
416 usnic_dbg("\n");
417
418 if (index > 1)
419 return -EINVAL;
420
421 mutex_lock(&us_ibdev->usdev_lock);
422 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
423 usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
424 &gid->raw[0]);
425 mutex_unlock(&us_ibdev->usdev_lock);
426
427 return 0;
428 }
429
usnic_ib_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)430 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
431 u16 *pkey)
432 {
433 if (index > 0)
434 return -EINVAL;
435
436 *pkey = 0xffff;
437 return 0;
438 }
439
usnic_ib_alloc_pd(struct ib_device * ibdev,struct ib_ucontext * context,struct ib_udata * udata)440 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
441 struct ib_ucontext *context,
442 struct ib_udata *udata)
443 {
444 struct usnic_ib_pd *pd;
445 void *umem_pd;
446
447 usnic_dbg("\n");
448
449 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
450 if (!pd)
451 return ERR_PTR(-ENOMEM);
452
453 umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
454 if (IS_ERR_OR_NULL(umem_pd)) {
455 kfree(pd);
456 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
457 }
458
459 usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
460 pd, context, ibdev->name);
461 return &pd->ibpd;
462 }
463
usnic_ib_dealloc_pd(struct ib_pd * pd)464 int usnic_ib_dealloc_pd(struct ib_pd *pd)
465 {
466 usnic_info("freeing domain 0x%p\n", pd);
467
468 usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
469 kfree(pd);
470 return 0;
471 }
472
usnic_ib_create_qp(struct ib_pd * pd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)473 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
474 struct ib_qp_init_attr *init_attr,
475 struct ib_udata *udata)
476 {
477 int err;
478 struct usnic_ib_dev *us_ibdev;
479 struct usnic_ib_qp_grp *qp_grp;
480 struct usnic_ib_ucontext *ucontext;
481 int cq_cnt;
482 struct usnic_vnic_res_spec res_spec;
483 struct usnic_ib_create_qp_cmd cmd;
484 struct usnic_transport_spec trans_spec;
485
486 usnic_dbg("\n");
487
488 ucontext = to_uucontext(pd->uobject->context);
489 us_ibdev = to_usdev(pd->device);
490
491 if (init_attr->create_flags)
492 return ERR_PTR(-EINVAL);
493
494 err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
495 if (err) {
496 usnic_err("%s: cannot copy udata for create_qp\n",
497 us_ibdev->ib_dev.name);
498 return ERR_PTR(-EINVAL);
499 }
500
501 err = create_qp_validate_user_data(cmd);
502 if (err) {
503 usnic_err("%s: Failed to validate user data\n",
504 us_ibdev->ib_dev.name);
505 return ERR_PTR(-EINVAL);
506 }
507
508 if (init_attr->qp_type != IB_QPT_UD) {
509 usnic_err("%s asked to make a non-UD QP: %d\n",
510 us_ibdev->ib_dev.name, init_attr->qp_type);
511 return ERR_PTR(-EINVAL);
512 }
513
514 trans_spec = cmd.spec;
515 mutex_lock(&us_ibdev->usdev_lock);
516 cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
517 res_spec = min_transport_spec[trans_spec.trans_type];
518 usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
519 qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
520 &trans_spec,
521 &res_spec);
522 if (IS_ERR_OR_NULL(qp_grp)) {
523 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
524 goto out_release_mutex;
525 }
526
527 err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
528 if (err) {
529 err = -EBUSY;
530 goto out_release_qp_grp;
531 }
532
533 qp_grp->ctx = ucontext;
534 list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
535 usnic_ib_log_vf(qp_grp->vf);
536 mutex_unlock(&us_ibdev->usdev_lock);
537 return &qp_grp->ibqp;
538
539 out_release_qp_grp:
540 qp_grp_destroy(qp_grp);
541 out_release_mutex:
542 mutex_unlock(&us_ibdev->usdev_lock);
543 return ERR_PTR(err);
544 }
545
usnic_ib_destroy_qp(struct ib_qp * qp)546 int usnic_ib_destroy_qp(struct ib_qp *qp)
547 {
548 struct usnic_ib_qp_grp *qp_grp;
549 struct usnic_ib_vf *vf;
550
551 usnic_dbg("\n");
552
553 qp_grp = to_uqp_grp(qp);
554 vf = qp_grp->vf;
555 mutex_lock(&vf->pf->usdev_lock);
556 if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
557 usnic_err("Failed to move qp grp %u to reset\n",
558 qp_grp->grp_id);
559 }
560
561 list_del(&qp_grp->link);
562 qp_grp_destroy(qp_grp);
563 mutex_unlock(&vf->pf->usdev_lock);
564
565 return 0;
566 }
567
usnic_ib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)568 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569 int attr_mask, struct ib_udata *udata)
570 {
571 struct usnic_ib_qp_grp *qp_grp;
572 int status;
573 usnic_dbg("\n");
574
575 qp_grp = to_uqp_grp(ibqp);
576
577 /* TODO: Future Support All States */
578 mutex_lock(&qp_grp->vf->pf->usdev_lock);
579 if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
580 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
581 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
582 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
583 } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
584 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
585 } else {
586 usnic_err("Unexpected combination mask: %u state: %u\n",
587 attr_mask & IB_QP_STATE, attr->qp_state);
588 status = -EINVAL;
589 }
590
591 mutex_unlock(&qp_grp->vf->pf->usdev_lock);
592 return status;
593 }
594
usnic_ib_create_cq(struct ib_device * ibdev,const struct ib_cq_init_attr * attr,struct ib_ucontext * context,struct ib_udata * udata)595 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev,
596 const struct ib_cq_init_attr *attr,
597 struct ib_ucontext *context,
598 struct ib_udata *udata)
599 {
600 struct ib_cq *cq;
601
602 usnic_dbg("\n");
603 if (attr->flags)
604 return ERR_PTR(-EINVAL);
605
606 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
607 if (!cq)
608 return ERR_PTR(-EBUSY);
609
610 return cq;
611 }
612
usnic_ib_destroy_cq(struct ib_cq * cq)613 int usnic_ib_destroy_cq(struct ib_cq *cq)
614 {
615 usnic_dbg("\n");
616 kfree(cq);
617 return 0;
618 }
619
usnic_ib_reg_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)620 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
621 u64 virt_addr, int access_flags,
622 struct ib_udata *udata)
623 {
624 struct usnic_ib_mr *mr;
625 int err;
626
627 usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
628 virt_addr, length);
629
630 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
631 if (IS_ERR_OR_NULL(mr))
632 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
633
634 mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
635 access_flags, 0);
636 if (IS_ERR_OR_NULL(mr->umem)) {
637 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
638 goto err_free;
639 }
640
641 mr->ibmr.lkey = mr->ibmr.rkey = 0;
642 return &mr->ibmr;
643
644 err_free:
645 kfree(mr);
646 return ERR_PTR(err);
647 }
648
usnic_ib_dereg_mr(struct ib_mr * ibmr)649 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
650 {
651 struct usnic_ib_mr *mr = to_umr(ibmr);
652
653 usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
654
655 usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
656 kfree(mr);
657 return 0;
658 }
659
usnic_ib_alloc_ucontext(struct ib_device * ibdev,struct ib_udata * udata)660 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
661 struct ib_udata *udata)
662 {
663 struct usnic_ib_ucontext *context;
664 struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
665 usnic_dbg("\n");
666
667 context = kmalloc(sizeof(*context), GFP_KERNEL);
668 if (!context)
669 return ERR_PTR(-ENOMEM);
670
671 INIT_LIST_HEAD(&context->qp_grp_list);
672 mutex_lock(&us_ibdev->usdev_lock);
673 list_add_tail(&context->link, &us_ibdev->ctx_list);
674 mutex_unlock(&us_ibdev->usdev_lock);
675
676 return &context->ibucontext;
677 }
678
usnic_ib_dealloc_ucontext(struct ib_ucontext * ibcontext)679 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
680 {
681 struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
682 struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
683 usnic_dbg("\n");
684
685 mutex_lock(&us_ibdev->usdev_lock);
686 BUG_ON(!list_empty(&context->qp_grp_list));
687 list_del(&context->link);
688 mutex_unlock(&us_ibdev->usdev_lock);
689 kfree(context);
690 return 0;
691 }
692
usnic_ib_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)693 int usnic_ib_mmap(struct ib_ucontext *context,
694 struct vm_area_struct *vma)
695 {
696 struct usnic_ib_ucontext *uctx = to_ucontext(context);
697 struct usnic_ib_dev *us_ibdev;
698 struct usnic_ib_qp_grp *qp_grp;
699 struct usnic_ib_vf *vf;
700 struct vnic_dev_bar *bar;
701 dma_addr_t bus_addr;
702 unsigned int len;
703 unsigned int vfid;
704
705 usnic_dbg("\n");
706
707 us_ibdev = to_usdev(context->device);
708 vma->vm_flags |= VM_IO;
709 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
710 vfid = vma->vm_pgoff;
711 usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
712 vma->vm_pgoff, PAGE_SHIFT, vfid);
713
714 mutex_lock(&us_ibdev->usdev_lock);
715 list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
716 vf = qp_grp->vf;
717 if (usnic_vnic_get_index(vf->vnic) == vfid) {
718 bar = usnic_vnic_get_bar(vf->vnic, 0);
719 if ((vma->vm_end - vma->vm_start) != bar->len) {
720 usnic_err("Bar0 Len %lu - Request map %lu\n",
721 bar->len,
722 vma->vm_end - vma->vm_start);
723 mutex_unlock(&us_ibdev->usdev_lock);
724 return -EINVAL;
725 }
726 bus_addr = bar->bus_addr;
727 len = bar->len;
728 usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
729 &bus_addr, bar->vaddr, bar->len);
730 mutex_unlock(&us_ibdev->usdev_lock);
731
732 return remap_pfn_range(vma,
733 vma->vm_start,
734 bus_addr >> PAGE_SHIFT,
735 len, vma->vm_page_prot);
736 }
737 }
738
739 mutex_unlock(&us_ibdev->usdev_lock);
740 usnic_err("No VF %u found\n", vfid);
741 return -EINVAL;
742 }
743
744 /* In ib callbacks section - Start of stub funcs */
usnic_ib_create_ah(struct ib_pd * pd,struct ib_ah_attr * ah_attr)745 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
746 struct ib_ah_attr *ah_attr)
747 {
748 usnic_dbg("\n");
749 return ERR_PTR(-EPERM);
750 }
751
usnic_ib_destroy_ah(struct ib_ah * ah)752 int usnic_ib_destroy_ah(struct ib_ah *ah)
753 {
754 usnic_dbg("\n");
755 return -EINVAL;
756 }
757
usnic_ib_post_send(struct ib_qp * ibqp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)758 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
759 struct ib_send_wr **bad_wr)
760 {
761 usnic_dbg("\n");
762 return -EINVAL;
763 }
764
usnic_ib_post_recv(struct ib_qp * ibqp,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)765 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
766 struct ib_recv_wr **bad_wr)
767 {
768 usnic_dbg("\n");
769 return -EINVAL;
770 }
771
usnic_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)772 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
773 struct ib_wc *wc)
774 {
775 usnic_dbg("\n");
776 return -EINVAL;
777 }
778
usnic_ib_req_notify_cq(struct ib_cq * cq,enum ib_cq_notify_flags flags)779 int usnic_ib_req_notify_cq(struct ib_cq *cq,
780 enum ib_cq_notify_flags flags)
781 {
782 usnic_dbg("\n");
783 return -EINVAL;
784 }
785
usnic_ib_get_dma_mr(struct ib_pd * pd,int acc)786 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
787 {
788 usnic_dbg("\n");
789 return ERR_PTR(-ENOMEM);
790 }
791
792
793 /* In ib callbacks section - End of stub funcs */
794 /* End of ib callbacks section */
795