1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6 #include <linux/vmalloc.h>
7 #include <linux/log2.h>
8
9 #include <rdma/ib_addr.h>
10 #include <rdma/ib_umem.h>
11 #include <rdma/ib_user_verbs.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/uverbs_ioctl.h>
14
15 #include "efa.h"
16
17 enum {
18 EFA_MMAP_DMA_PAGE = 0,
19 EFA_MMAP_IO_WC,
20 EFA_MMAP_IO_NC,
21 };
22
23 #define EFA_AENQ_ENABLED_GROUPS \
24 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
25 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
26
27 struct efa_user_mmap_entry {
28 struct rdma_user_mmap_entry rdma_entry;
29 u64 address;
30 u8 mmap_flag;
31 };
32
33 #define EFA_DEFINE_DEVICE_STATS(op) \
34 op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
35 op(EFA_COMPLETED_CMDS, "completed_cmds") \
36 op(EFA_CMDS_ERR, "cmds_err") \
37 op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
38 op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
39 op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
40 op(EFA_CREATE_QP_ERR, "create_qp_err") \
41 op(EFA_CREATE_CQ_ERR, "create_cq_err") \
42 op(EFA_REG_MR_ERR, "reg_mr_err") \
43 op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
44 op(EFA_CREATE_AH_ERR, "create_ah_err") \
45 op(EFA_MMAP_ERR, "mmap_err")
46
47 #define EFA_DEFINE_PORT_STATS(op) \
48 op(EFA_TX_BYTES, "tx_bytes") \
49 op(EFA_TX_PKTS, "tx_pkts") \
50 op(EFA_RX_BYTES, "rx_bytes") \
51 op(EFA_RX_PKTS, "rx_pkts") \
52 op(EFA_RX_DROPS, "rx_drops") \
53 op(EFA_SEND_BYTES, "send_bytes") \
54 op(EFA_SEND_WRS, "send_wrs") \
55 op(EFA_RECV_BYTES, "recv_bytes") \
56 op(EFA_RECV_WRS, "recv_wrs") \
57 op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
58 op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
59 op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
60 op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
61
62 #define EFA_STATS_ENUM(ename, name) ename,
63 #define EFA_STATS_STR(ename, name) [ename] = name,
64
65 enum efa_hw_device_stats {
66 EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
67 };
68
69 static const char *const efa_device_stats_names[] = {
70 EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
71 };
72
73 enum efa_hw_port_stats {
74 EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
75 };
76
77 static const char *const efa_port_stats_names[] = {
78 EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
79 };
80
81 #define EFA_CHUNK_PAYLOAD_SHIFT 12
82 #define EFA_CHUNK_PAYLOAD_SIZE BIT(EFA_CHUNK_PAYLOAD_SHIFT)
83 #define EFA_CHUNK_PAYLOAD_PTR_SIZE 8
84
85 #define EFA_CHUNK_SHIFT 12
86 #define EFA_CHUNK_SIZE BIT(EFA_CHUNK_SHIFT)
87 #define EFA_CHUNK_PTR_SIZE sizeof(struct efa_com_ctrl_buff_info)
88
89 #define EFA_PTRS_PER_CHUNK \
90 ((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
91
92 #define EFA_CHUNK_USED_SIZE \
93 ((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
94
95 struct pbl_chunk {
96 dma_addr_t dma_addr;
97 u64 *buf;
98 u32 length;
99 };
100
101 struct pbl_chunk_list {
102 struct pbl_chunk *chunks;
103 unsigned int size;
104 };
105
106 struct pbl_context {
107 union {
108 struct {
109 dma_addr_t dma_addr;
110 } continuous;
111 struct {
112 u32 pbl_buf_size_in_pages;
113 struct scatterlist *sgl;
114 int sg_dma_cnt;
115 struct pbl_chunk_list chunk_list;
116 } indirect;
117 } phys;
118 u64 *pbl_buf;
119 u32 pbl_buf_size_in_bytes;
120 u8 physically_continuous;
121 };
122
to_edev(struct ib_device * ibdev)123 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
124 {
125 return container_of(ibdev, struct efa_dev, ibdev);
126 }
127
to_eucontext(struct ib_ucontext * ibucontext)128 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
129 {
130 return container_of(ibucontext, struct efa_ucontext, ibucontext);
131 }
132
to_epd(struct ib_pd * ibpd)133 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
134 {
135 return container_of(ibpd, struct efa_pd, ibpd);
136 }
137
to_emr(struct ib_mr * ibmr)138 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
139 {
140 return container_of(ibmr, struct efa_mr, ibmr);
141 }
142
to_eqp(struct ib_qp * ibqp)143 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
144 {
145 return container_of(ibqp, struct efa_qp, ibqp);
146 }
147
to_ecq(struct ib_cq * ibcq)148 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
149 {
150 return container_of(ibcq, struct efa_cq, ibcq);
151 }
152
to_eah(struct ib_ah * ibah)153 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
154 {
155 return container_of(ibah, struct efa_ah, ibah);
156 }
157
158 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)159 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
160 {
161 return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
162 }
163
164 #define EFA_DEV_CAP(dev, cap) \
165 ((dev)->dev_attr.device_caps & \
166 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
167
168 #define is_reserved_cleared(reserved) \
169 !memchr_inv(reserved, 0, sizeof(reserved))
170
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)171 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
172 size_t size, enum dma_data_direction dir)
173 {
174 void *addr;
175
176 addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
177 if (!addr)
178 return NULL;
179
180 *dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
181 if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
182 ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
183 free_pages_exact(addr, size);
184 return NULL;
185 }
186
187 return addr;
188 }
189
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)190 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
191 dma_addr_t dma_addr,
192 size_t size, enum dma_data_direction dir)
193 {
194 dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
195 free_pages_exact(cpu_addr, size);
196 }
197
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)198 int efa_query_device(struct ib_device *ibdev,
199 struct ib_device_attr *props,
200 struct ib_udata *udata)
201 {
202 struct efa_com_get_device_attr_result *dev_attr;
203 struct efa_ibv_ex_query_device_resp resp = {};
204 struct efa_dev *dev = to_edev(ibdev);
205 int err;
206
207 if (udata && udata->inlen &&
208 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
209 ibdev_dbg(ibdev,
210 "Incompatible ABI params, udata not cleared\n");
211 return -EINVAL;
212 }
213
214 dev_attr = &dev->dev_attr;
215
216 memset(props, 0, sizeof(*props));
217 props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
218 props->page_size_cap = dev_attr->page_size_cap;
219 props->vendor_id = dev->pdev->vendor;
220 props->vendor_part_id = dev->pdev->device;
221 props->hw_ver = dev->pdev->subsystem_device;
222 props->max_qp = dev_attr->max_qp;
223 props->max_cq = dev_attr->max_cq;
224 props->max_pd = dev_attr->max_pd;
225 props->max_mr = dev_attr->max_mr;
226 props->max_ah = dev_attr->max_ah;
227 props->max_cqe = dev_attr->max_cq_depth;
228 props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
229 dev_attr->max_rq_depth);
230 props->max_send_sge = dev_attr->max_sq_sge;
231 props->max_recv_sge = dev_attr->max_rq_sge;
232 props->max_sge_rd = dev_attr->max_wr_rdma_sge;
233 props->max_pkeys = 1;
234
235 if (udata && udata->outlen) {
236 resp.max_sq_sge = dev_attr->max_sq_sge;
237 resp.max_rq_sge = dev_attr->max_rq_sge;
238 resp.max_sq_wr = dev_attr->max_sq_depth;
239 resp.max_rq_wr = dev_attr->max_rq_depth;
240 resp.max_rdma_size = dev_attr->max_rdma_size;
241
242 if (EFA_DEV_CAP(dev, RDMA_READ))
243 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
244
245 if (EFA_DEV_CAP(dev, RNR_RETRY))
246 resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
247
248 err = ib_copy_to_udata(udata, &resp,
249 min(sizeof(resp), udata->outlen));
250 if (err) {
251 ibdev_dbg(ibdev,
252 "Failed to copy udata for query_device\n");
253 return err;
254 }
255 }
256
257 return 0;
258 }
259
efa_query_port(struct ib_device * ibdev,u32 port,struct ib_port_attr * props)260 int efa_query_port(struct ib_device *ibdev, u32 port,
261 struct ib_port_attr *props)
262 {
263 struct efa_dev *dev = to_edev(ibdev);
264
265 props->lmc = 1;
266
267 props->state = IB_PORT_ACTIVE;
268 props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
269 props->gid_tbl_len = 1;
270 props->pkey_tbl_len = 1;
271 props->active_speed = IB_SPEED_EDR;
272 props->active_width = IB_WIDTH_4X;
273 props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
274 props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
275 props->max_msg_sz = dev->dev_attr.mtu;
276 props->max_vl_num = 1;
277
278 return 0;
279 }
280
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)281 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
282 int qp_attr_mask,
283 struct ib_qp_init_attr *qp_init_attr)
284 {
285 struct efa_dev *dev = to_edev(ibqp->device);
286 struct efa_com_query_qp_params params = {};
287 struct efa_com_query_qp_result result;
288 struct efa_qp *qp = to_eqp(ibqp);
289 int err;
290
291 #define EFA_QUERY_QP_SUPP_MASK \
292 (IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
293 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
294
295 if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
296 ibdev_dbg(&dev->ibdev,
297 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
298 qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
299 return -EOPNOTSUPP;
300 }
301
302 memset(qp_attr, 0, sizeof(*qp_attr));
303 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
304
305 params.qp_handle = qp->qp_handle;
306 err = efa_com_query_qp(&dev->edev, ¶ms, &result);
307 if (err)
308 return err;
309
310 qp_attr->qp_state = result.qp_state;
311 qp_attr->qkey = result.qkey;
312 qp_attr->sq_psn = result.sq_psn;
313 qp_attr->sq_draining = result.sq_draining;
314 qp_attr->port_num = 1;
315 qp_attr->rnr_retry = result.rnr_retry;
316
317 qp_attr->cap.max_send_wr = qp->max_send_wr;
318 qp_attr->cap.max_recv_wr = qp->max_recv_wr;
319 qp_attr->cap.max_send_sge = qp->max_send_sge;
320 qp_attr->cap.max_recv_sge = qp->max_recv_sge;
321 qp_attr->cap.max_inline_data = qp->max_inline_data;
322
323 qp_init_attr->qp_type = ibqp->qp_type;
324 qp_init_attr->recv_cq = ibqp->recv_cq;
325 qp_init_attr->send_cq = ibqp->send_cq;
326 qp_init_attr->qp_context = ibqp->qp_context;
327 qp_init_attr->cap = qp_attr->cap;
328
329 return 0;
330 }
331
efa_query_gid(struct ib_device * ibdev,u32 port,int index,union ib_gid * gid)332 int efa_query_gid(struct ib_device *ibdev, u32 port, int index,
333 union ib_gid *gid)
334 {
335 struct efa_dev *dev = to_edev(ibdev);
336
337 memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
338
339 return 0;
340 }
341
efa_query_pkey(struct ib_device * ibdev,u32 port,u16 index,u16 * pkey)342 int efa_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
343 u16 *pkey)
344 {
345 if (index > 0)
346 return -EINVAL;
347
348 *pkey = 0xffff;
349 return 0;
350 }
351
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)352 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
353 {
354 struct efa_com_dealloc_pd_params params = {
355 .pdn = pdn,
356 };
357
358 return efa_com_dealloc_pd(&dev->edev, ¶ms);
359 }
360
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)361 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
362 {
363 struct efa_dev *dev = to_edev(ibpd->device);
364 struct efa_ibv_alloc_pd_resp resp = {};
365 struct efa_com_alloc_pd_result result;
366 struct efa_pd *pd = to_epd(ibpd);
367 int err;
368
369 if (udata->inlen &&
370 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
371 ibdev_dbg(&dev->ibdev,
372 "Incompatible ABI params, udata not cleared\n");
373 err = -EINVAL;
374 goto err_out;
375 }
376
377 err = efa_com_alloc_pd(&dev->edev, &result);
378 if (err)
379 goto err_out;
380
381 pd->pdn = result.pdn;
382 resp.pdn = result.pdn;
383
384 if (udata->outlen) {
385 err = ib_copy_to_udata(udata, &resp,
386 min(sizeof(resp), udata->outlen));
387 if (err) {
388 ibdev_dbg(&dev->ibdev,
389 "Failed to copy udata for alloc_pd\n");
390 goto err_dealloc_pd;
391 }
392 }
393
394 ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
395
396 return 0;
397
398 err_dealloc_pd:
399 efa_pd_dealloc(dev, result.pdn);
400 err_out:
401 atomic64_inc(&dev->stats.alloc_pd_err);
402 return err;
403 }
404
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)405 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
406 {
407 struct efa_dev *dev = to_edev(ibpd->device);
408 struct efa_pd *pd = to_epd(ibpd);
409
410 ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
411 efa_pd_dealloc(dev, pd->pdn);
412 return 0;
413 }
414
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)415 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
416 {
417 struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
418
419 return efa_com_destroy_qp(&dev->edev, ¶ms);
420 }
421
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)422 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
423 {
424 rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
425 rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
426 rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
427 rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
428 }
429
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)430 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
431 {
432 struct efa_dev *dev = to_edev(ibqp->pd->device);
433 struct efa_qp *qp = to_eqp(ibqp);
434 int err;
435
436 ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
437
438 efa_qp_user_mmap_entries_remove(qp);
439
440 err = efa_destroy_qp_handle(dev, qp->qp_handle);
441 if (err)
442 return err;
443
444 if (qp->rq_cpu_addr) {
445 ibdev_dbg(&dev->ibdev,
446 "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
447 qp->rq_cpu_addr, qp->rq_size,
448 &qp->rq_dma_addr);
449 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
450 qp->rq_size, DMA_TO_DEVICE);
451 }
452
453 return 0;
454 }
455
456 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)457 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
458 u64 address, size_t length,
459 u8 mmap_flag, u64 *offset)
460 {
461 struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
462 int err;
463
464 if (!entry)
465 return NULL;
466
467 entry->address = address;
468 entry->mmap_flag = mmap_flag;
469
470 err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
471 length);
472 if (err) {
473 kfree(entry);
474 return NULL;
475 }
476 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
477
478 return &entry->rdma_entry;
479 }
480
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)481 static int qp_mmap_entries_setup(struct efa_qp *qp,
482 struct efa_dev *dev,
483 struct efa_ucontext *ucontext,
484 struct efa_com_create_qp_params *params,
485 struct efa_ibv_create_qp_resp *resp)
486 {
487 size_t length;
488 u64 address;
489
490 address = dev->db_bar_addr + resp->sq_db_offset;
491 qp->sq_db_mmap_entry =
492 efa_user_mmap_entry_insert(&ucontext->ibucontext,
493 address,
494 PAGE_SIZE, EFA_MMAP_IO_NC,
495 &resp->sq_db_mmap_key);
496 if (!qp->sq_db_mmap_entry)
497 return -ENOMEM;
498
499 resp->sq_db_offset &= ~PAGE_MASK;
500
501 address = dev->mem_bar_addr + resp->llq_desc_offset;
502 length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
503 (resp->llq_desc_offset & ~PAGE_MASK));
504
505 qp->llq_desc_mmap_entry =
506 efa_user_mmap_entry_insert(&ucontext->ibucontext,
507 address, length,
508 EFA_MMAP_IO_WC,
509 &resp->llq_desc_mmap_key);
510 if (!qp->llq_desc_mmap_entry)
511 goto err_remove_mmap;
512
513 resp->llq_desc_offset &= ~PAGE_MASK;
514
515 if (qp->rq_size) {
516 address = dev->db_bar_addr + resp->rq_db_offset;
517
518 qp->rq_db_mmap_entry =
519 efa_user_mmap_entry_insert(&ucontext->ibucontext,
520 address, PAGE_SIZE,
521 EFA_MMAP_IO_NC,
522 &resp->rq_db_mmap_key);
523 if (!qp->rq_db_mmap_entry)
524 goto err_remove_mmap;
525
526 resp->rq_db_offset &= ~PAGE_MASK;
527
528 address = virt_to_phys(qp->rq_cpu_addr);
529 qp->rq_mmap_entry =
530 efa_user_mmap_entry_insert(&ucontext->ibucontext,
531 address, qp->rq_size,
532 EFA_MMAP_DMA_PAGE,
533 &resp->rq_mmap_key);
534 if (!qp->rq_mmap_entry)
535 goto err_remove_mmap;
536
537 resp->rq_mmap_size = qp->rq_size;
538 }
539
540 return 0;
541
542 err_remove_mmap:
543 efa_qp_user_mmap_entries_remove(qp);
544
545 return -ENOMEM;
546 }
547
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)548 static int efa_qp_validate_cap(struct efa_dev *dev,
549 struct ib_qp_init_attr *init_attr)
550 {
551 if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
552 ibdev_dbg(&dev->ibdev,
553 "qp: requested send wr[%u] exceeds the max[%u]\n",
554 init_attr->cap.max_send_wr,
555 dev->dev_attr.max_sq_depth);
556 return -EINVAL;
557 }
558 if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
559 ibdev_dbg(&dev->ibdev,
560 "qp: requested receive wr[%u] exceeds the max[%u]\n",
561 init_attr->cap.max_recv_wr,
562 dev->dev_attr.max_rq_depth);
563 return -EINVAL;
564 }
565 if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
566 ibdev_dbg(&dev->ibdev,
567 "qp: requested sge send[%u] exceeds the max[%u]\n",
568 init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
569 return -EINVAL;
570 }
571 if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
572 ibdev_dbg(&dev->ibdev,
573 "qp: requested sge recv[%u] exceeds the max[%u]\n",
574 init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
575 return -EINVAL;
576 }
577 if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
578 ibdev_dbg(&dev->ibdev,
579 "qp: requested inline data[%u] exceeds the max[%u]\n",
580 init_attr->cap.max_inline_data,
581 dev->dev_attr.inline_buf_size);
582 return -EINVAL;
583 }
584
585 return 0;
586 }
587
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)588 static int efa_qp_validate_attr(struct efa_dev *dev,
589 struct ib_qp_init_attr *init_attr)
590 {
591 if (init_attr->qp_type != IB_QPT_DRIVER &&
592 init_attr->qp_type != IB_QPT_UD) {
593 ibdev_dbg(&dev->ibdev,
594 "Unsupported qp type %d\n", init_attr->qp_type);
595 return -EOPNOTSUPP;
596 }
597
598 if (init_attr->srq) {
599 ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
600 return -EOPNOTSUPP;
601 }
602
603 if (init_attr->create_flags) {
604 ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
605 return -EOPNOTSUPP;
606 }
607
608 return 0;
609 }
610
efa_create_qp(struct ib_qp * ibqp,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)611 int efa_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
612 struct ib_udata *udata)
613 {
614 struct efa_com_create_qp_params create_qp_params = {};
615 struct efa_com_create_qp_result create_qp_resp;
616 struct efa_dev *dev = to_edev(ibqp->device);
617 struct efa_ibv_create_qp_resp resp = {};
618 struct efa_ibv_create_qp cmd = {};
619 struct efa_qp *qp = to_eqp(ibqp);
620 struct efa_ucontext *ucontext;
621 int err;
622
623 ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
624 ibucontext);
625
626 err = efa_qp_validate_cap(dev, init_attr);
627 if (err)
628 goto err_out;
629
630 err = efa_qp_validate_attr(dev, init_attr);
631 if (err)
632 goto err_out;
633
634 if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
635 ibdev_dbg(&dev->ibdev,
636 "Incompatible ABI params, no input udata\n");
637 err = -EINVAL;
638 goto err_out;
639 }
640
641 if (udata->inlen > sizeof(cmd) &&
642 !ib_is_udata_cleared(udata, sizeof(cmd),
643 udata->inlen - sizeof(cmd))) {
644 ibdev_dbg(&dev->ibdev,
645 "Incompatible ABI params, unknown fields in udata\n");
646 err = -EINVAL;
647 goto err_out;
648 }
649
650 err = ib_copy_from_udata(&cmd, udata,
651 min(sizeof(cmd), udata->inlen));
652 if (err) {
653 ibdev_dbg(&dev->ibdev,
654 "Cannot copy udata for create_qp\n");
655 goto err_out;
656 }
657
658 if (cmd.comp_mask) {
659 ibdev_dbg(&dev->ibdev,
660 "Incompatible ABI params, unknown fields in udata\n");
661 err = -EINVAL;
662 goto err_out;
663 }
664
665 create_qp_params.uarn = ucontext->uarn;
666 create_qp_params.pd = to_epd(ibqp->pd)->pdn;
667
668 if (init_attr->qp_type == IB_QPT_UD) {
669 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
670 } else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
671 create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
672 } else {
673 ibdev_dbg(&dev->ibdev,
674 "Unsupported qp type %d driver qp type %d\n",
675 init_attr->qp_type, cmd.driver_qp_type);
676 err = -EOPNOTSUPP;
677 goto err_out;
678 }
679
680 ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
681 init_attr->qp_type, cmd.driver_qp_type);
682 create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
683 create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
684 create_qp_params.sq_depth = init_attr->cap.max_send_wr;
685 create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
686
687 create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
688 create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
689 qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
690 if (qp->rq_size) {
691 qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
692 qp->rq_size, DMA_TO_DEVICE);
693 if (!qp->rq_cpu_addr) {
694 err = -ENOMEM;
695 goto err_out;
696 }
697
698 ibdev_dbg(&dev->ibdev,
699 "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
700 qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
701 create_qp_params.rq_base_addr = qp->rq_dma_addr;
702 }
703
704 err = efa_com_create_qp(&dev->edev, &create_qp_params,
705 &create_qp_resp);
706 if (err)
707 goto err_free_mapped;
708
709 resp.sq_db_offset = create_qp_resp.sq_db_offset;
710 resp.rq_db_offset = create_qp_resp.rq_db_offset;
711 resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
712 resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
713 resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
714
715 err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
716 &resp);
717 if (err)
718 goto err_destroy_qp;
719
720 qp->qp_handle = create_qp_resp.qp_handle;
721 qp->ibqp.qp_num = create_qp_resp.qp_num;
722 qp->max_send_wr = init_attr->cap.max_send_wr;
723 qp->max_recv_wr = init_attr->cap.max_recv_wr;
724 qp->max_send_sge = init_attr->cap.max_send_sge;
725 qp->max_recv_sge = init_attr->cap.max_recv_sge;
726 qp->max_inline_data = init_attr->cap.max_inline_data;
727
728 if (udata->outlen) {
729 err = ib_copy_to_udata(udata, &resp,
730 min(sizeof(resp), udata->outlen));
731 if (err) {
732 ibdev_dbg(&dev->ibdev,
733 "Failed to copy udata for qp[%u]\n",
734 create_qp_resp.qp_num);
735 goto err_remove_mmap_entries;
736 }
737 }
738
739 ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
740
741 return 0;
742
743 err_remove_mmap_entries:
744 efa_qp_user_mmap_entries_remove(qp);
745 err_destroy_qp:
746 efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
747 err_free_mapped:
748 if (qp->rq_size)
749 efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
750 qp->rq_size, DMA_TO_DEVICE);
751 err_out:
752 atomic64_inc(&dev->stats.create_qp_err);
753 return err;
754 }
755
756 static const struct {
757 int valid;
758 enum ib_qp_attr_mask req_param;
759 enum ib_qp_attr_mask opt_param;
760 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
761 [IB_QPS_RESET] = {
762 [IB_QPS_RESET] = { .valid = 1 },
763 [IB_QPS_INIT] = {
764 .valid = 1,
765 .req_param = IB_QP_PKEY_INDEX |
766 IB_QP_PORT |
767 IB_QP_QKEY,
768 },
769 },
770 [IB_QPS_INIT] = {
771 [IB_QPS_RESET] = { .valid = 1 },
772 [IB_QPS_ERR] = { .valid = 1 },
773 [IB_QPS_INIT] = {
774 .valid = 1,
775 .opt_param = IB_QP_PKEY_INDEX |
776 IB_QP_PORT |
777 IB_QP_QKEY,
778 },
779 [IB_QPS_RTR] = {
780 .valid = 1,
781 .opt_param = IB_QP_PKEY_INDEX |
782 IB_QP_QKEY,
783 },
784 },
785 [IB_QPS_RTR] = {
786 [IB_QPS_RESET] = { .valid = 1 },
787 [IB_QPS_ERR] = { .valid = 1 },
788 [IB_QPS_RTS] = {
789 .valid = 1,
790 .req_param = IB_QP_SQ_PSN,
791 .opt_param = IB_QP_CUR_STATE |
792 IB_QP_QKEY |
793 IB_QP_RNR_RETRY,
794
795 }
796 },
797 [IB_QPS_RTS] = {
798 [IB_QPS_RESET] = { .valid = 1 },
799 [IB_QPS_ERR] = { .valid = 1 },
800 [IB_QPS_RTS] = {
801 .valid = 1,
802 .opt_param = IB_QP_CUR_STATE |
803 IB_QP_QKEY,
804 },
805 [IB_QPS_SQD] = {
806 .valid = 1,
807 .opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
808 },
809 },
810 [IB_QPS_SQD] = {
811 [IB_QPS_RESET] = { .valid = 1 },
812 [IB_QPS_ERR] = { .valid = 1 },
813 [IB_QPS_RTS] = {
814 .valid = 1,
815 .opt_param = IB_QP_CUR_STATE |
816 IB_QP_QKEY,
817 },
818 [IB_QPS_SQD] = {
819 .valid = 1,
820 .opt_param = IB_QP_PKEY_INDEX |
821 IB_QP_QKEY,
822 }
823 },
824 [IB_QPS_SQE] = {
825 [IB_QPS_RESET] = { .valid = 1 },
826 [IB_QPS_ERR] = { .valid = 1 },
827 [IB_QPS_RTS] = {
828 .valid = 1,
829 .opt_param = IB_QP_CUR_STATE |
830 IB_QP_QKEY,
831 }
832 },
833 [IB_QPS_ERR] = {
834 [IB_QPS_RESET] = { .valid = 1 },
835 [IB_QPS_ERR] = { .valid = 1 },
836 }
837 };
838
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)839 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
840 enum ib_qp_state next_state,
841 enum ib_qp_attr_mask mask)
842 {
843 enum ib_qp_attr_mask req_param, opt_param;
844
845 if (mask & IB_QP_CUR_STATE &&
846 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
847 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
848 return false;
849
850 if (!srd_qp_state_table[cur_state][next_state].valid)
851 return false;
852
853 req_param = srd_qp_state_table[cur_state][next_state].req_param;
854 opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
855
856 if ((mask & req_param) != req_param)
857 return false;
858
859 if (mask & ~(req_param | opt_param | IB_QP_STATE))
860 return false;
861
862 return true;
863 }
864
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)865 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
866 struct ib_qp_attr *qp_attr, int qp_attr_mask,
867 enum ib_qp_state cur_state,
868 enum ib_qp_state new_state)
869 {
870 int err;
871
872 #define EFA_MODIFY_QP_SUPP_MASK \
873 (IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
874 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
875 IB_QP_RNR_RETRY)
876
877 if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
878 ibdev_dbg(&dev->ibdev,
879 "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
880 qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
881 return -EOPNOTSUPP;
882 }
883
884 if (qp->ibqp.qp_type == IB_QPT_DRIVER)
885 err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
886 qp_attr_mask);
887 else
888 err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
889 qp_attr_mask);
890
891 if (err) {
892 ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
893 return -EINVAL;
894 }
895
896 if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
897 ibdev_dbg(&dev->ibdev, "Can't change port num\n");
898 return -EOPNOTSUPP;
899 }
900
901 if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
902 ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
903 return -EOPNOTSUPP;
904 }
905
906 return 0;
907 }
908
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)909 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
910 int qp_attr_mask, struct ib_udata *udata)
911 {
912 struct efa_dev *dev = to_edev(ibqp->device);
913 struct efa_com_modify_qp_params params = {};
914 struct efa_qp *qp = to_eqp(ibqp);
915 enum ib_qp_state cur_state;
916 enum ib_qp_state new_state;
917 int err;
918
919 if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
920 return -EOPNOTSUPP;
921
922 if (udata->inlen &&
923 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
924 ibdev_dbg(&dev->ibdev,
925 "Incompatible ABI params, udata not cleared\n");
926 return -EINVAL;
927 }
928
929 cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
930 qp->state;
931 new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
932
933 err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
934 new_state);
935 if (err)
936 return err;
937
938 params.qp_handle = qp->qp_handle;
939
940 if (qp_attr_mask & IB_QP_STATE) {
941 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
942 1);
943 EFA_SET(¶ms.modify_mask,
944 EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
945 params.cur_qp_state = cur_state;
946 params.qp_state = new_state;
947 }
948
949 if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
950 EFA_SET(¶ms.modify_mask,
951 EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
952 params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
953 }
954
955 if (qp_attr_mask & IB_QP_QKEY) {
956 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
957 params.qkey = qp_attr->qkey;
958 }
959
960 if (qp_attr_mask & IB_QP_SQ_PSN) {
961 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
962 params.sq_psn = qp_attr->sq_psn;
963 }
964
965 if (qp_attr_mask & IB_QP_RNR_RETRY) {
966 EFA_SET(¶ms.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
967 1);
968 params.rnr_retry = qp_attr->rnr_retry;
969 }
970
971 err = efa_com_modify_qp(&dev->edev, ¶ms);
972 if (err)
973 return err;
974
975 qp->state = new_state;
976
977 return 0;
978 }
979
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)980 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
981 {
982 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
983
984 return efa_com_destroy_cq(&dev->edev, ¶ms);
985 }
986
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)987 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
988 {
989 struct efa_dev *dev = to_edev(ibcq->device);
990 struct efa_cq *cq = to_ecq(ibcq);
991
992 ibdev_dbg(&dev->ibdev,
993 "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
994 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
995
996 rdma_user_mmap_entry_remove(cq->mmap_entry);
997 efa_destroy_cq_idx(dev, cq->cq_idx);
998 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
999 DMA_FROM_DEVICE);
1000 return 0;
1001 }
1002
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp)1003 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1004 struct efa_ibv_create_cq_resp *resp)
1005 {
1006 resp->q_mmap_size = cq->size;
1007 cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1008 virt_to_phys(cq->cpu_addr),
1009 cq->size, EFA_MMAP_DMA_PAGE,
1010 &resp->q_mmap_key);
1011 if (!cq->mmap_entry)
1012 return -ENOMEM;
1013
1014 return 0;
1015 }
1016
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1017 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1018 struct ib_udata *udata)
1019 {
1020 struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1021 udata, struct efa_ucontext, ibucontext);
1022 struct efa_ibv_create_cq_resp resp = {};
1023 struct efa_com_create_cq_params params;
1024 struct efa_com_create_cq_result result;
1025 struct ib_device *ibdev = ibcq->device;
1026 struct efa_dev *dev = to_edev(ibdev);
1027 struct efa_ibv_create_cq cmd = {};
1028 struct efa_cq *cq = to_ecq(ibcq);
1029 int entries = attr->cqe;
1030 int err;
1031
1032 ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1033
1034 if (attr->flags)
1035 return -EOPNOTSUPP;
1036
1037 if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1038 ibdev_dbg(ibdev,
1039 "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1040 entries, dev->dev_attr.max_cq_depth);
1041 err = -EINVAL;
1042 goto err_out;
1043 }
1044
1045 if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1046 ibdev_dbg(ibdev,
1047 "Incompatible ABI params, no input udata\n");
1048 err = -EINVAL;
1049 goto err_out;
1050 }
1051
1052 if (udata->inlen > sizeof(cmd) &&
1053 !ib_is_udata_cleared(udata, sizeof(cmd),
1054 udata->inlen - sizeof(cmd))) {
1055 ibdev_dbg(ibdev,
1056 "Incompatible ABI params, unknown fields in udata\n");
1057 err = -EINVAL;
1058 goto err_out;
1059 }
1060
1061 err = ib_copy_from_udata(&cmd, udata,
1062 min(sizeof(cmd), udata->inlen));
1063 if (err) {
1064 ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1065 goto err_out;
1066 }
1067
1068 if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
1069 ibdev_dbg(ibdev,
1070 "Incompatible ABI params, unknown fields in udata\n");
1071 err = -EINVAL;
1072 goto err_out;
1073 }
1074
1075 if (!cmd.cq_entry_size) {
1076 ibdev_dbg(ibdev,
1077 "Invalid entry size [%u]\n", cmd.cq_entry_size);
1078 err = -EINVAL;
1079 goto err_out;
1080 }
1081
1082 if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1083 ibdev_dbg(ibdev,
1084 "Invalid number of sub cqs[%u] expected[%u]\n",
1085 cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1086 err = -EINVAL;
1087 goto err_out;
1088 }
1089
1090 cq->ucontext = ucontext;
1091 cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1092 cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1093 DMA_FROM_DEVICE);
1094 if (!cq->cpu_addr) {
1095 err = -ENOMEM;
1096 goto err_out;
1097 }
1098
1099 params.uarn = cq->ucontext->uarn;
1100 params.cq_depth = entries;
1101 params.dma_addr = cq->dma_addr;
1102 params.entry_size_in_bytes = cmd.cq_entry_size;
1103 params.num_sub_cqs = cmd.num_sub_cqs;
1104 err = efa_com_create_cq(&dev->edev, ¶ms, &result);
1105 if (err)
1106 goto err_free_mapped;
1107
1108 resp.cq_idx = result.cq_idx;
1109 cq->cq_idx = result.cq_idx;
1110 cq->ibcq.cqe = result.actual_depth;
1111 WARN_ON_ONCE(entries != result.actual_depth);
1112
1113 err = cq_mmap_entries_setup(dev, cq, &resp);
1114 if (err) {
1115 ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1116 cq->cq_idx);
1117 goto err_destroy_cq;
1118 }
1119
1120 if (udata->outlen) {
1121 err = ib_copy_to_udata(udata, &resp,
1122 min(sizeof(resp), udata->outlen));
1123 if (err) {
1124 ibdev_dbg(ibdev,
1125 "Failed to copy udata for create_cq\n");
1126 goto err_remove_mmap;
1127 }
1128 }
1129
1130 ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1131 cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1132
1133 return 0;
1134
1135 err_remove_mmap:
1136 rdma_user_mmap_entry_remove(cq->mmap_entry);
1137 err_destroy_cq:
1138 efa_destroy_cq_idx(dev, cq->cq_idx);
1139 err_free_mapped:
1140 efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1141 DMA_FROM_DEVICE);
1142
1143 err_out:
1144 atomic64_inc(&dev->stats.create_cq_err);
1145 return err;
1146 }
1147
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1148 static int umem_to_page_list(struct efa_dev *dev,
1149 struct ib_umem *umem,
1150 u64 *page_list,
1151 u32 hp_cnt,
1152 u8 hp_shift)
1153 {
1154 u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1155 struct ib_block_iter biter;
1156 unsigned int hp_idx = 0;
1157
1158 ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1159 hp_cnt, pages_in_hp);
1160
1161 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1162 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1163
1164 return 0;
1165 }
1166
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1167 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1168 {
1169 struct scatterlist *sglist;
1170 struct page *pg;
1171 int i;
1172
1173 sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1174 if (!sglist)
1175 return NULL;
1176 sg_init_table(sglist, page_cnt);
1177 for (i = 0; i < page_cnt; i++) {
1178 pg = vmalloc_to_page(buf);
1179 if (!pg)
1180 goto err;
1181 sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1182 buf += PAGE_SIZE / sizeof(*buf);
1183 }
1184 return sglist;
1185
1186 err:
1187 kfree(sglist);
1188 return NULL;
1189 }
1190
1191 /*
1192 * create a chunk list of physical pages dma addresses from the supplied
1193 * scatter gather list
1194 */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1195 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1196 {
1197 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1198 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1199 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1200 unsigned int chunk_list_size, chunk_idx, payload_idx;
1201 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1202 struct efa_com_ctrl_buff_info *ctrl_buf;
1203 u64 *cur_chunk_buf, *prev_chunk_buf;
1204 struct ib_block_iter biter;
1205 dma_addr_t dma_addr;
1206 int i;
1207
1208 /* allocate a chunk list that consists of 4KB chunks */
1209 chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1210
1211 chunk_list->size = chunk_list_size;
1212 chunk_list->chunks = kcalloc(chunk_list_size,
1213 sizeof(*chunk_list->chunks),
1214 GFP_KERNEL);
1215 if (!chunk_list->chunks)
1216 return -ENOMEM;
1217
1218 ibdev_dbg(&dev->ibdev,
1219 "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1220 page_cnt);
1221
1222 /* allocate chunk buffers: */
1223 for (i = 0; i < chunk_list_size; i++) {
1224 chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1225 if (!chunk_list->chunks[i].buf)
1226 goto chunk_list_dealloc;
1227
1228 chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1229 }
1230 chunk_list->chunks[chunk_list_size - 1].length =
1231 ((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1232 EFA_CHUNK_PTR_SIZE;
1233
1234 /* fill the dma addresses of sg list pages to chunks: */
1235 chunk_idx = 0;
1236 payload_idx = 0;
1237 cur_chunk_buf = chunk_list->chunks[0].buf;
1238 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1239 EFA_CHUNK_PAYLOAD_SIZE) {
1240 cur_chunk_buf[payload_idx++] =
1241 rdma_block_iter_dma_address(&biter);
1242
1243 if (payload_idx == EFA_PTRS_PER_CHUNK) {
1244 chunk_idx++;
1245 cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1246 payload_idx = 0;
1247 }
1248 }
1249
1250 /* map chunks to dma and fill chunks next ptrs */
1251 for (i = chunk_list_size - 1; i >= 0; i--) {
1252 dma_addr = dma_map_single(&dev->pdev->dev,
1253 chunk_list->chunks[i].buf,
1254 chunk_list->chunks[i].length,
1255 DMA_TO_DEVICE);
1256 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1257 ibdev_err(&dev->ibdev,
1258 "chunk[%u] dma_map_failed\n", i);
1259 goto chunk_list_unmap;
1260 }
1261
1262 chunk_list->chunks[i].dma_addr = dma_addr;
1263 ibdev_dbg(&dev->ibdev,
1264 "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1265
1266 if (!i)
1267 break;
1268
1269 prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1270
1271 ctrl_buf = (struct efa_com_ctrl_buff_info *)
1272 &prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1273 ctrl_buf->length = chunk_list->chunks[i].length;
1274
1275 efa_com_set_dma_addr(dma_addr,
1276 &ctrl_buf->address.mem_addr_high,
1277 &ctrl_buf->address.mem_addr_low);
1278 }
1279
1280 return 0;
1281
1282 chunk_list_unmap:
1283 for (; i < chunk_list_size; i++) {
1284 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1285 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1286 }
1287 chunk_list_dealloc:
1288 for (i = 0; i < chunk_list_size; i++)
1289 kfree(chunk_list->chunks[i].buf);
1290
1291 kfree(chunk_list->chunks);
1292 return -ENOMEM;
1293 }
1294
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1295 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1296 {
1297 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1298 int i;
1299
1300 for (i = 0; i < chunk_list->size; i++) {
1301 dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1302 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1303 kfree(chunk_list->chunks[i].buf);
1304 }
1305
1306 kfree(chunk_list->chunks);
1307 }
1308
1309 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1310 static int pbl_continuous_initialize(struct efa_dev *dev,
1311 struct pbl_context *pbl)
1312 {
1313 dma_addr_t dma_addr;
1314
1315 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1316 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1317 if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1318 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1319 return -ENOMEM;
1320 }
1321
1322 pbl->phys.continuous.dma_addr = dma_addr;
1323 ibdev_dbg(&dev->ibdev,
1324 "pbl continuous - dma_addr = %pad, size[%u]\n",
1325 &dma_addr, pbl->pbl_buf_size_in_bytes);
1326
1327 return 0;
1328 }
1329
1330 /*
1331 * initialize pbl indirect mode:
1332 * create a chunk list out of the dma addresses of the physical pages of
1333 * pbl buffer.
1334 */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1335 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1336 {
1337 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
1338 struct scatterlist *sgl;
1339 int sg_dma_cnt, err;
1340
1341 BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1342 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1343 if (!sgl)
1344 return -ENOMEM;
1345
1346 sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1347 if (!sg_dma_cnt) {
1348 err = -EINVAL;
1349 goto err_map;
1350 }
1351
1352 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1353 pbl->phys.indirect.sgl = sgl;
1354 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1355 err = pbl_chunk_list_create(dev, pbl);
1356 if (err) {
1357 ibdev_dbg(&dev->ibdev,
1358 "chunk_list creation failed[%d]\n", err);
1359 goto err_chunk;
1360 }
1361
1362 ibdev_dbg(&dev->ibdev,
1363 "pbl indirect - size[%u], chunks[%u]\n",
1364 pbl->pbl_buf_size_in_bytes,
1365 pbl->phys.indirect.chunk_list.size);
1366
1367 return 0;
1368
1369 err_chunk:
1370 dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1371 err_map:
1372 kfree(sgl);
1373 return err;
1374 }
1375
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1376 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1377 {
1378 pbl_chunk_list_destroy(dev, pbl);
1379 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1380 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1381 kfree(pbl->phys.indirect.sgl);
1382 }
1383
1384 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1385 static int pbl_create(struct efa_dev *dev,
1386 struct pbl_context *pbl,
1387 struct ib_umem *umem,
1388 int hp_cnt,
1389 u8 hp_shift)
1390 {
1391 int err;
1392
1393 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1394 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1395 if (!pbl->pbl_buf)
1396 return -ENOMEM;
1397
1398 if (is_vmalloc_addr(pbl->pbl_buf)) {
1399 pbl->physically_continuous = 0;
1400 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1401 hp_shift);
1402 if (err)
1403 goto err_free;
1404
1405 err = pbl_indirect_initialize(dev, pbl);
1406 if (err)
1407 goto err_free;
1408 } else {
1409 pbl->physically_continuous = 1;
1410 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1411 hp_shift);
1412 if (err)
1413 goto err_free;
1414
1415 err = pbl_continuous_initialize(dev, pbl);
1416 if (err)
1417 goto err_free;
1418 }
1419
1420 ibdev_dbg(&dev->ibdev,
1421 "user_pbl_created: user_pages[%u], continuous[%u]\n",
1422 hp_cnt, pbl->physically_continuous);
1423
1424 return 0;
1425
1426 err_free:
1427 kvfree(pbl->pbl_buf);
1428 return err;
1429 }
1430
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1431 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1432 {
1433 if (pbl->physically_continuous)
1434 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1435 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1436 else
1437 pbl_indirect_terminate(dev, pbl);
1438
1439 kvfree(pbl->pbl_buf);
1440 }
1441
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1442 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1443 struct efa_com_reg_mr_params *params)
1444 {
1445 int err;
1446
1447 params->inline_pbl = 1;
1448 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1449 params->page_num, params->page_shift);
1450 if (err)
1451 return err;
1452
1453 ibdev_dbg(&dev->ibdev,
1454 "inline_pbl_array - pages[%u]\n", params->page_num);
1455
1456 return 0;
1457 }
1458
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1459 static int efa_create_pbl(struct efa_dev *dev,
1460 struct pbl_context *pbl,
1461 struct efa_mr *mr,
1462 struct efa_com_reg_mr_params *params)
1463 {
1464 int err;
1465
1466 err = pbl_create(dev, pbl, mr->umem, params->page_num,
1467 params->page_shift);
1468 if (err) {
1469 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1470 return err;
1471 }
1472
1473 params->inline_pbl = 0;
1474 params->indirect = !pbl->physically_continuous;
1475 if (pbl->physically_continuous) {
1476 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1477
1478 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1479 ¶ms->pbl.pbl.address.mem_addr_high,
1480 ¶ms->pbl.pbl.address.mem_addr_low);
1481 } else {
1482 params->pbl.pbl.length =
1483 pbl->phys.indirect.chunk_list.chunks[0].length;
1484
1485 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1486 ¶ms->pbl.pbl.address.mem_addr_high,
1487 ¶ms->pbl.pbl.address.mem_addr_low);
1488 }
1489
1490 return 0;
1491 }
1492
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)1493 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1494 u64 virt_addr, int access_flags,
1495 struct ib_udata *udata)
1496 {
1497 struct efa_dev *dev = to_edev(ibpd->device);
1498 struct efa_com_reg_mr_params params = {};
1499 struct efa_com_reg_mr_result result = {};
1500 struct pbl_context pbl;
1501 int supp_access_flags;
1502 unsigned int pg_sz;
1503 struct efa_mr *mr;
1504 int inline_size;
1505 int err;
1506
1507 if (udata && udata->inlen &&
1508 !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1509 ibdev_dbg(&dev->ibdev,
1510 "Incompatible ABI params, udata not cleared\n");
1511 err = -EINVAL;
1512 goto err_out;
1513 }
1514
1515 supp_access_flags =
1516 IB_ACCESS_LOCAL_WRITE |
1517 (EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
1518
1519 access_flags &= ~IB_ACCESS_OPTIONAL;
1520 if (access_flags & ~supp_access_flags) {
1521 ibdev_dbg(&dev->ibdev,
1522 "Unsupported access flags[%#x], supported[%#x]\n",
1523 access_flags, supp_access_flags);
1524 err = -EOPNOTSUPP;
1525 goto err_out;
1526 }
1527
1528 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1529 if (!mr) {
1530 err = -ENOMEM;
1531 goto err_out;
1532 }
1533
1534 mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1535 if (IS_ERR(mr->umem)) {
1536 err = PTR_ERR(mr->umem);
1537 ibdev_dbg(&dev->ibdev,
1538 "Failed to pin and map user space memory[%d]\n", err);
1539 goto err_free;
1540 }
1541
1542 params.pd = to_epd(ibpd)->pdn;
1543 params.iova = virt_addr;
1544 params.mr_length_in_bytes = length;
1545 params.permissions = access_flags;
1546
1547 pg_sz = ib_umem_find_best_pgsz(mr->umem,
1548 dev->dev_attr.page_size_cap,
1549 virt_addr);
1550 if (!pg_sz) {
1551 err = -EOPNOTSUPP;
1552 ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1553 dev->dev_attr.page_size_cap);
1554 goto err_unmap;
1555 }
1556
1557 params.page_shift = order_base_2(pg_sz);
1558 params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1559
1560 ibdev_dbg(&dev->ibdev,
1561 "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1562 start, length, params.page_shift, params.page_num);
1563
1564 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1565 if (params.page_num <= inline_size) {
1566 err = efa_create_inline_pbl(dev, mr, ¶ms);
1567 if (err)
1568 goto err_unmap;
1569
1570 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1571 if (err)
1572 goto err_unmap;
1573 } else {
1574 err = efa_create_pbl(dev, &pbl, mr, ¶ms);
1575 if (err)
1576 goto err_unmap;
1577
1578 err = efa_com_register_mr(&dev->edev, ¶ms, &result);
1579 pbl_destroy(dev, &pbl);
1580
1581 if (err)
1582 goto err_unmap;
1583 }
1584
1585 mr->ibmr.lkey = result.l_key;
1586 mr->ibmr.rkey = result.r_key;
1587 mr->ibmr.length = length;
1588 ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1589
1590 return &mr->ibmr;
1591
1592 err_unmap:
1593 ib_umem_release(mr->umem);
1594 err_free:
1595 kfree(mr);
1596 err_out:
1597 atomic64_inc(&dev->stats.reg_mr_err);
1598 return ERR_PTR(err);
1599 }
1600
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1601 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1602 {
1603 struct efa_dev *dev = to_edev(ibmr->device);
1604 struct efa_com_dereg_mr_params params;
1605 struct efa_mr *mr = to_emr(ibmr);
1606 int err;
1607
1608 ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1609
1610 params.l_key = mr->ibmr.lkey;
1611 err = efa_com_dereg_mr(&dev->edev, ¶ms);
1612 if (err)
1613 return err;
1614
1615 ib_umem_release(mr->umem);
1616 kfree(mr);
1617
1618 return 0;
1619 }
1620
efa_get_port_immutable(struct ib_device * ibdev,u32 port_num,struct ib_port_immutable * immutable)1621 int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
1622 struct ib_port_immutable *immutable)
1623 {
1624 struct ib_port_attr attr;
1625 int err;
1626
1627 err = ib_query_port(ibdev, port_num, &attr);
1628 if (err) {
1629 ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1630 return err;
1631 }
1632
1633 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1634 immutable->gid_tbl_len = attr.gid_tbl_len;
1635
1636 return 0;
1637 }
1638
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1639 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1640 {
1641 struct efa_com_dealloc_uar_params params = {
1642 .uarn = uarn,
1643 };
1644
1645 return efa_com_dealloc_uar(&dev->edev, ¶ms);
1646 }
1647
1648 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1649 (_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1650 NULL : #_attr)
1651
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1652 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1653 const struct efa_ibv_alloc_ucontext_cmd *cmd)
1654 {
1655 struct efa_dev *dev = to_edev(ibucontext->device);
1656 char *attr_str;
1657
1658 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1659 EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1660 goto err;
1661
1662 if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1663 EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1664 attr_str))
1665 goto err;
1666
1667 return 0;
1668
1669 err:
1670 ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1671 attr_str);
1672 return -EOPNOTSUPP;
1673 }
1674
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1675 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1676 {
1677 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1678 struct efa_dev *dev = to_edev(ibucontext->device);
1679 struct efa_ibv_alloc_ucontext_resp resp = {};
1680 struct efa_ibv_alloc_ucontext_cmd cmd = {};
1681 struct efa_com_alloc_uar_result result;
1682 int err;
1683
1684 /*
1685 * it's fine if the driver does not know all request fields,
1686 * we will ack input fields in our response.
1687 */
1688
1689 err = ib_copy_from_udata(&cmd, udata,
1690 min(sizeof(cmd), udata->inlen));
1691 if (err) {
1692 ibdev_dbg(&dev->ibdev,
1693 "Cannot copy udata for alloc_ucontext\n");
1694 goto err_out;
1695 }
1696
1697 err = efa_user_comp_handshake(ibucontext, &cmd);
1698 if (err)
1699 goto err_out;
1700
1701 err = efa_com_alloc_uar(&dev->edev, &result);
1702 if (err)
1703 goto err_out;
1704
1705 ucontext->uarn = result.uarn;
1706
1707 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1708 resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1709 resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1710 resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1711 resp.max_llq_size = dev->dev_attr.max_llq_size;
1712 resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1713 resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1714
1715 err = ib_copy_to_udata(udata, &resp,
1716 min(sizeof(resp), udata->outlen));
1717 if (err)
1718 goto err_dealloc_uar;
1719
1720 return 0;
1721
1722 err_dealloc_uar:
1723 efa_dealloc_uar(dev, result.uarn);
1724 err_out:
1725 atomic64_inc(&dev->stats.alloc_ucontext_err);
1726 return err;
1727 }
1728
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)1729 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1730 {
1731 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1732 struct efa_dev *dev = to_edev(ibucontext->device);
1733
1734 efa_dealloc_uar(dev, ucontext->uarn);
1735 }
1736
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)1737 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1738 {
1739 struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1740
1741 kfree(entry);
1742 }
1743
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)1744 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1745 struct vm_area_struct *vma)
1746 {
1747 struct rdma_user_mmap_entry *rdma_entry;
1748 struct efa_user_mmap_entry *entry;
1749 unsigned long va;
1750 int err = 0;
1751 u64 pfn;
1752
1753 rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1754 if (!rdma_entry) {
1755 ibdev_dbg(&dev->ibdev,
1756 "pgoff[%#lx] does not have valid entry\n",
1757 vma->vm_pgoff);
1758 atomic64_inc(&dev->stats.mmap_err);
1759 return -EINVAL;
1760 }
1761 entry = to_emmap(rdma_entry);
1762
1763 ibdev_dbg(&dev->ibdev,
1764 "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1765 entry->address, rdma_entry->npages * PAGE_SIZE,
1766 entry->mmap_flag);
1767
1768 pfn = entry->address >> PAGE_SHIFT;
1769 switch (entry->mmap_flag) {
1770 case EFA_MMAP_IO_NC:
1771 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1772 entry->rdma_entry.npages * PAGE_SIZE,
1773 pgprot_noncached(vma->vm_page_prot),
1774 rdma_entry);
1775 break;
1776 case EFA_MMAP_IO_WC:
1777 err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1778 entry->rdma_entry.npages * PAGE_SIZE,
1779 pgprot_writecombine(vma->vm_page_prot),
1780 rdma_entry);
1781 break;
1782 case EFA_MMAP_DMA_PAGE:
1783 for (va = vma->vm_start; va < vma->vm_end;
1784 va += PAGE_SIZE, pfn++) {
1785 err = vm_insert_page(vma, va, pfn_to_page(pfn));
1786 if (err)
1787 break;
1788 }
1789 break;
1790 default:
1791 err = -EINVAL;
1792 }
1793
1794 if (err) {
1795 ibdev_dbg(
1796 &dev->ibdev,
1797 "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1798 entry->address, rdma_entry->npages * PAGE_SIZE,
1799 entry->mmap_flag, err);
1800 atomic64_inc(&dev->stats.mmap_err);
1801 }
1802
1803 rdma_user_mmap_entry_put(rdma_entry);
1804 return err;
1805 }
1806
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)1807 int efa_mmap(struct ib_ucontext *ibucontext,
1808 struct vm_area_struct *vma)
1809 {
1810 struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1811 struct efa_dev *dev = to_edev(ibucontext->device);
1812 size_t length = vma->vm_end - vma->vm_start;
1813
1814 ibdev_dbg(&dev->ibdev,
1815 "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1816 vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1817
1818 return __efa_mmap(dev, ucontext, vma);
1819 }
1820
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)1821 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1822 {
1823 struct efa_com_destroy_ah_params params = {
1824 .ah = ah->ah,
1825 .pdn = to_epd(ah->ibah.pd)->pdn,
1826 };
1827
1828 return efa_com_destroy_ah(&dev->edev, ¶ms);
1829 }
1830
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)1831 int efa_create_ah(struct ib_ah *ibah,
1832 struct rdma_ah_init_attr *init_attr,
1833 struct ib_udata *udata)
1834 {
1835 struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1836 struct efa_dev *dev = to_edev(ibah->device);
1837 struct efa_com_create_ah_params params = {};
1838 struct efa_ibv_create_ah_resp resp = {};
1839 struct efa_com_create_ah_result result;
1840 struct efa_ah *ah = to_eah(ibah);
1841 int err;
1842
1843 if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1844 ibdev_dbg(&dev->ibdev,
1845 "Create address handle is not supported in atomic context\n");
1846 err = -EOPNOTSUPP;
1847 goto err_out;
1848 }
1849
1850 if (udata->inlen &&
1851 !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1852 ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1853 err = -EINVAL;
1854 goto err_out;
1855 }
1856
1857 memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1858 sizeof(params.dest_addr));
1859 params.pdn = to_epd(ibah->pd)->pdn;
1860 err = efa_com_create_ah(&dev->edev, ¶ms, &result);
1861 if (err)
1862 goto err_out;
1863
1864 memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1865 ah->ah = result.ah;
1866
1867 resp.efa_address_handle = result.ah;
1868
1869 if (udata->outlen) {
1870 err = ib_copy_to_udata(udata, &resp,
1871 min(sizeof(resp), udata->outlen));
1872 if (err) {
1873 ibdev_dbg(&dev->ibdev,
1874 "Failed to copy udata for create_ah response\n");
1875 goto err_destroy_ah;
1876 }
1877 }
1878 ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1879
1880 return 0;
1881
1882 err_destroy_ah:
1883 efa_ah_destroy(dev, ah);
1884 err_out:
1885 atomic64_inc(&dev->stats.create_ah_err);
1886 return err;
1887 }
1888
efa_destroy_ah(struct ib_ah * ibah,u32 flags)1889 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1890 {
1891 struct efa_dev *dev = to_edev(ibah->pd->device);
1892 struct efa_ah *ah = to_eah(ibah);
1893
1894 ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1895
1896 if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1897 ibdev_dbg(&dev->ibdev,
1898 "Destroy address handle is not supported in atomic context\n");
1899 return -EOPNOTSUPP;
1900 }
1901
1902 efa_ah_destroy(dev, ah);
1903 return 0;
1904 }
1905
efa_alloc_hw_port_stats(struct ib_device * ibdev,u32 port_num)1906 struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
1907 u32 port_num)
1908 {
1909 return rdma_alloc_hw_stats_struct(efa_port_stats_names,
1910 ARRAY_SIZE(efa_port_stats_names),
1911 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1912 }
1913
efa_alloc_hw_device_stats(struct ib_device * ibdev)1914 struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
1915 {
1916 return rdma_alloc_hw_stats_struct(efa_device_stats_names,
1917 ARRAY_SIZE(efa_device_stats_names),
1918 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1919 }
1920
efa_fill_device_stats(struct efa_dev * dev,struct rdma_hw_stats * stats)1921 static int efa_fill_device_stats(struct efa_dev *dev,
1922 struct rdma_hw_stats *stats)
1923 {
1924 struct efa_com_stats_admin *as = &dev->edev.aq.stats;
1925 struct efa_stats *s = &dev->stats;
1926
1927 stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1928 stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1929 stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1930 stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1931
1932 stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1933 stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
1934 stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
1935 stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
1936 stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
1937 stats->value[EFA_ALLOC_UCONTEXT_ERR] =
1938 atomic64_read(&s->alloc_ucontext_err);
1939 stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
1940 stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
1941
1942 return ARRAY_SIZE(efa_device_stats_names);
1943 }
1944
efa_fill_port_stats(struct efa_dev * dev,struct rdma_hw_stats * stats,u32 port_num)1945 static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
1946 u32 port_num)
1947 {
1948 struct efa_com_get_stats_params params = {};
1949 union efa_com_get_stats_result result;
1950 struct efa_com_rdma_read_stats *rrs;
1951 struct efa_com_messages_stats *ms;
1952 struct efa_com_basic_stats *bs;
1953 int err;
1954
1955 params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1956 params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1957
1958 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1959 if (err)
1960 return err;
1961
1962 bs = &result.basic_stats;
1963 stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1964 stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1965 stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1966 stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1967 stats->value[EFA_RX_DROPS] = bs->rx_drops;
1968
1969 params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
1970 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1971 if (err)
1972 return err;
1973
1974 ms = &result.messages_stats;
1975 stats->value[EFA_SEND_BYTES] = ms->send_bytes;
1976 stats->value[EFA_SEND_WRS] = ms->send_wrs;
1977 stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
1978 stats->value[EFA_RECV_WRS] = ms->recv_wrs;
1979
1980 params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
1981 err = efa_com_get_stats(&dev->edev, ¶ms, &result);
1982 if (err)
1983 return err;
1984
1985 rrs = &result.rdma_read_stats;
1986 stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
1987 stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
1988 stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
1989 stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
1990
1991 return ARRAY_SIZE(efa_port_stats_names);
1992 }
1993
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u32 port_num,int index)1994 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1995 u32 port_num, int index)
1996 {
1997 if (port_num)
1998 return efa_fill_port_stats(to_edev(ibdev), stats, port_num);
1999 else
2000 return efa_fill_device_stats(to_edev(ibdev), stats);
2001 }
2002
efa_port_link_layer(struct ib_device * ibdev,u32 port_num)2003 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
2004 u32 port_num)
2005 {
2006 return IB_LINK_LAYER_UNSPECIFIED;
2007 }
2008
2009