• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include <linux/vmalloc.h>
7 #include <linux/log2.h>
8 
9 #include <rdma/ib_addr.h>
10 #include <rdma/ib_umem.h>
11 #include <rdma/ib_user_verbs.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/uverbs_ioctl.h>
14 
15 #include "efa.h"
16 
17 enum {
18 	EFA_MMAP_DMA_PAGE = 0,
19 	EFA_MMAP_IO_WC,
20 	EFA_MMAP_IO_NC,
21 };
22 
23 #define EFA_AENQ_ENABLED_GROUPS \
24 	(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
25 	 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
26 
27 struct efa_user_mmap_entry {
28 	struct rdma_user_mmap_entry rdma_entry;
29 	u64 address;
30 	u8 mmap_flag;
31 };
32 
33 #define EFA_DEFINE_STATS(op) \
34 	op(EFA_TX_BYTES, "tx_bytes") \
35 	op(EFA_TX_PKTS, "tx_pkts") \
36 	op(EFA_RX_BYTES, "rx_bytes") \
37 	op(EFA_RX_PKTS, "rx_pkts") \
38 	op(EFA_RX_DROPS, "rx_drops") \
39 	op(EFA_SEND_BYTES, "send_bytes") \
40 	op(EFA_SEND_WRS, "send_wrs") \
41 	op(EFA_RECV_BYTES, "recv_bytes") \
42 	op(EFA_RECV_WRS, "recv_wrs") \
43 	op(EFA_RDMA_READ_WRS, "rdma_read_wrs") \
44 	op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
45 	op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
46 	op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
47 	op(EFA_SUBMITTED_CMDS, "submitted_cmds") \
48 	op(EFA_COMPLETED_CMDS, "completed_cmds") \
49 	op(EFA_CMDS_ERR, "cmds_err") \
50 	op(EFA_NO_COMPLETION_CMDS, "no_completion_cmds") \
51 	op(EFA_KEEP_ALIVE_RCVD, "keep_alive_rcvd") \
52 	op(EFA_ALLOC_PD_ERR, "alloc_pd_err") \
53 	op(EFA_CREATE_QP_ERR, "create_qp_err") \
54 	op(EFA_CREATE_CQ_ERR, "create_cq_err") \
55 	op(EFA_REG_MR_ERR, "reg_mr_err") \
56 	op(EFA_ALLOC_UCONTEXT_ERR, "alloc_ucontext_err") \
57 	op(EFA_CREATE_AH_ERR, "create_ah_err") \
58 	op(EFA_MMAP_ERR, "mmap_err")
59 
60 #define EFA_STATS_ENUM(ename, name) ename,
61 #define EFA_STATS_STR(ename, name) [ename] = name,
62 
63 enum efa_hw_stats {
64 	EFA_DEFINE_STATS(EFA_STATS_ENUM)
65 };
66 
67 static const char *const efa_stats_names[] = {
68 	EFA_DEFINE_STATS(EFA_STATS_STR)
69 };
70 
71 #define EFA_CHUNK_PAYLOAD_SHIFT       12
72 #define EFA_CHUNK_PAYLOAD_SIZE        BIT(EFA_CHUNK_PAYLOAD_SHIFT)
73 #define EFA_CHUNK_PAYLOAD_PTR_SIZE    8
74 
75 #define EFA_CHUNK_SHIFT               12
76 #define EFA_CHUNK_SIZE                BIT(EFA_CHUNK_SHIFT)
77 #define EFA_CHUNK_PTR_SIZE            sizeof(struct efa_com_ctrl_buff_info)
78 
79 #define EFA_PTRS_PER_CHUNK \
80 	((EFA_CHUNK_SIZE - EFA_CHUNK_PTR_SIZE) / EFA_CHUNK_PAYLOAD_PTR_SIZE)
81 
82 #define EFA_CHUNK_USED_SIZE \
83 	((EFA_PTRS_PER_CHUNK * EFA_CHUNK_PAYLOAD_PTR_SIZE) + EFA_CHUNK_PTR_SIZE)
84 
85 struct pbl_chunk {
86 	dma_addr_t dma_addr;
87 	u64 *buf;
88 	u32 length;
89 };
90 
91 struct pbl_chunk_list {
92 	struct pbl_chunk *chunks;
93 	unsigned int size;
94 };
95 
96 struct pbl_context {
97 	union {
98 		struct {
99 			dma_addr_t dma_addr;
100 		} continuous;
101 		struct {
102 			u32 pbl_buf_size_in_pages;
103 			struct scatterlist *sgl;
104 			int sg_dma_cnt;
105 			struct pbl_chunk_list chunk_list;
106 		} indirect;
107 	} phys;
108 	u64 *pbl_buf;
109 	u32 pbl_buf_size_in_bytes;
110 	u8 physically_continuous;
111 };
112 
to_edev(struct ib_device * ibdev)113 static inline struct efa_dev *to_edev(struct ib_device *ibdev)
114 {
115 	return container_of(ibdev, struct efa_dev, ibdev);
116 }
117 
to_eucontext(struct ib_ucontext * ibucontext)118 static inline struct efa_ucontext *to_eucontext(struct ib_ucontext *ibucontext)
119 {
120 	return container_of(ibucontext, struct efa_ucontext, ibucontext);
121 }
122 
to_epd(struct ib_pd * ibpd)123 static inline struct efa_pd *to_epd(struct ib_pd *ibpd)
124 {
125 	return container_of(ibpd, struct efa_pd, ibpd);
126 }
127 
to_emr(struct ib_mr * ibmr)128 static inline struct efa_mr *to_emr(struct ib_mr *ibmr)
129 {
130 	return container_of(ibmr, struct efa_mr, ibmr);
131 }
132 
to_eqp(struct ib_qp * ibqp)133 static inline struct efa_qp *to_eqp(struct ib_qp *ibqp)
134 {
135 	return container_of(ibqp, struct efa_qp, ibqp);
136 }
137 
to_ecq(struct ib_cq * ibcq)138 static inline struct efa_cq *to_ecq(struct ib_cq *ibcq)
139 {
140 	return container_of(ibcq, struct efa_cq, ibcq);
141 }
142 
to_eah(struct ib_ah * ibah)143 static inline struct efa_ah *to_eah(struct ib_ah *ibah)
144 {
145 	return container_of(ibah, struct efa_ah, ibah);
146 }
147 
148 static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry * rdma_entry)149 to_emmap(struct rdma_user_mmap_entry *rdma_entry)
150 {
151 	return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
152 }
153 
154 #define EFA_DEV_CAP(dev, cap) \
155 	((dev)->dev_attr.device_caps & \
156 	 EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_##cap##_MASK)
157 
158 #define is_reserved_cleared(reserved) \
159 	!memchr_inv(reserved, 0, sizeof(reserved))
160 
efa_zalloc_mapped(struct efa_dev * dev,dma_addr_t * dma_addr,size_t size,enum dma_data_direction dir)161 static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
162 			       size_t size, enum dma_data_direction dir)
163 {
164 	void *addr;
165 
166 	addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
167 	if (!addr)
168 		return NULL;
169 
170 	*dma_addr = dma_map_single(&dev->pdev->dev, addr, size, dir);
171 	if (dma_mapping_error(&dev->pdev->dev, *dma_addr)) {
172 		ibdev_err(&dev->ibdev, "Failed to map DMA address\n");
173 		free_pages_exact(addr, size);
174 		return NULL;
175 	}
176 
177 	return addr;
178 }
179 
efa_free_mapped(struct efa_dev * dev,void * cpu_addr,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir)180 static void efa_free_mapped(struct efa_dev *dev, void *cpu_addr,
181 			    dma_addr_t dma_addr,
182 			    size_t size, enum dma_data_direction dir)
183 {
184 	dma_unmap_single(&dev->pdev->dev, dma_addr, size, dir);
185 	free_pages_exact(cpu_addr, size);
186 }
187 
efa_query_device(struct ib_device * ibdev,struct ib_device_attr * props,struct ib_udata * udata)188 int efa_query_device(struct ib_device *ibdev,
189 		     struct ib_device_attr *props,
190 		     struct ib_udata *udata)
191 {
192 	struct efa_com_get_device_attr_result *dev_attr;
193 	struct efa_ibv_ex_query_device_resp resp = {};
194 	struct efa_dev *dev = to_edev(ibdev);
195 	int err;
196 
197 	if (udata && udata->inlen &&
198 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
199 		ibdev_dbg(ibdev,
200 			  "Incompatible ABI params, udata not cleared\n");
201 		return -EINVAL;
202 	}
203 
204 	dev_attr = &dev->dev_attr;
205 
206 	memset(props, 0, sizeof(*props));
207 	props->max_mr_size = dev_attr->max_mr_pages * PAGE_SIZE;
208 	props->page_size_cap = dev_attr->page_size_cap;
209 	props->vendor_id = dev->pdev->vendor;
210 	props->vendor_part_id = dev->pdev->device;
211 	props->hw_ver = dev->pdev->subsystem_device;
212 	props->max_qp = dev_attr->max_qp;
213 	props->max_cq = dev_attr->max_cq;
214 	props->max_pd = dev_attr->max_pd;
215 	props->max_mr = dev_attr->max_mr;
216 	props->max_ah = dev_attr->max_ah;
217 	props->max_cqe = dev_attr->max_cq_depth;
218 	props->max_qp_wr = min_t(u32, dev_attr->max_sq_depth,
219 				 dev_attr->max_rq_depth);
220 	props->max_send_sge = dev_attr->max_sq_sge;
221 	props->max_recv_sge = dev_attr->max_rq_sge;
222 	props->max_sge_rd = dev_attr->max_wr_rdma_sge;
223 	props->max_pkeys = 1;
224 
225 	if (udata && udata->outlen) {
226 		resp.max_sq_sge = dev_attr->max_sq_sge;
227 		resp.max_rq_sge = dev_attr->max_rq_sge;
228 		resp.max_sq_wr = dev_attr->max_sq_depth;
229 		resp.max_rq_wr = dev_attr->max_rq_depth;
230 		resp.max_rdma_size = dev_attr->max_rdma_size;
231 
232 		if (EFA_DEV_CAP(dev, RDMA_READ))
233 			resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
234 
235 		if (EFA_DEV_CAP(dev, RNR_RETRY))
236 			resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
237 
238 		err = ib_copy_to_udata(udata, &resp,
239 				       min(sizeof(resp), udata->outlen));
240 		if (err) {
241 			ibdev_dbg(ibdev,
242 				  "Failed to copy udata for query_device\n");
243 			return err;
244 		}
245 	}
246 
247 	return 0;
248 }
249 
efa_query_port(struct ib_device * ibdev,u8 port,struct ib_port_attr * props)250 int efa_query_port(struct ib_device *ibdev, u8 port,
251 		   struct ib_port_attr *props)
252 {
253 	struct efa_dev *dev = to_edev(ibdev);
254 
255 	props->lmc = 1;
256 
257 	props->state = IB_PORT_ACTIVE;
258 	props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
259 	props->gid_tbl_len = 1;
260 	props->pkey_tbl_len = 1;
261 	props->active_speed = IB_SPEED_EDR;
262 	props->active_width = IB_WIDTH_4X;
263 	props->max_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
264 	props->active_mtu = ib_mtu_int_to_enum(dev->dev_attr.mtu);
265 	props->max_msg_sz = dev->dev_attr.mtu;
266 	props->max_vl_num = 1;
267 
268 	return 0;
269 }
270 
efa_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)271 int efa_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
272 		 int qp_attr_mask,
273 		 struct ib_qp_init_attr *qp_init_attr)
274 {
275 	struct efa_dev *dev = to_edev(ibqp->device);
276 	struct efa_com_query_qp_params params = {};
277 	struct efa_com_query_qp_result result;
278 	struct efa_qp *qp = to_eqp(ibqp);
279 	int err;
280 
281 #define EFA_QUERY_QP_SUPP_MASK \
282 	(IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | \
283 	 IB_QP_QKEY | IB_QP_SQ_PSN | IB_QP_CAP | IB_QP_RNR_RETRY)
284 
285 	if (qp_attr_mask & ~EFA_QUERY_QP_SUPP_MASK) {
286 		ibdev_dbg(&dev->ibdev,
287 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
288 			  qp_attr_mask, EFA_QUERY_QP_SUPP_MASK);
289 		return -EOPNOTSUPP;
290 	}
291 
292 	memset(qp_attr, 0, sizeof(*qp_attr));
293 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
294 
295 	params.qp_handle = qp->qp_handle;
296 	err = efa_com_query_qp(&dev->edev, &params, &result);
297 	if (err)
298 		return err;
299 
300 	qp_attr->qp_state = result.qp_state;
301 	qp_attr->qkey = result.qkey;
302 	qp_attr->sq_psn = result.sq_psn;
303 	qp_attr->sq_draining = result.sq_draining;
304 	qp_attr->port_num = 1;
305 	qp_attr->rnr_retry = result.rnr_retry;
306 
307 	qp_attr->cap.max_send_wr = qp->max_send_wr;
308 	qp_attr->cap.max_recv_wr = qp->max_recv_wr;
309 	qp_attr->cap.max_send_sge = qp->max_send_sge;
310 	qp_attr->cap.max_recv_sge = qp->max_recv_sge;
311 	qp_attr->cap.max_inline_data = qp->max_inline_data;
312 
313 	qp_init_attr->qp_type = ibqp->qp_type;
314 	qp_init_attr->recv_cq = ibqp->recv_cq;
315 	qp_init_attr->send_cq = ibqp->send_cq;
316 	qp_init_attr->qp_context = ibqp->qp_context;
317 	qp_init_attr->cap = qp_attr->cap;
318 
319 	return 0;
320 }
321 
efa_query_gid(struct ib_device * ibdev,u8 port,int index,union ib_gid * gid)322 int efa_query_gid(struct ib_device *ibdev, u8 port, int index,
323 		  union ib_gid *gid)
324 {
325 	struct efa_dev *dev = to_edev(ibdev);
326 
327 	memcpy(gid->raw, dev->dev_attr.addr, sizeof(dev->dev_attr.addr));
328 
329 	return 0;
330 }
331 
efa_query_pkey(struct ib_device * ibdev,u8 port,u16 index,u16 * pkey)332 int efa_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
333 		   u16 *pkey)
334 {
335 	if (index > 0)
336 		return -EINVAL;
337 
338 	*pkey = 0xffff;
339 	return 0;
340 }
341 
efa_pd_dealloc(struct efa_dev * dev,u16 pdn)342 static int efa_pd_dealloc(struct efa_dev *dev, u16 pdn)
343 {
344 	struct efa_com_dealloc_pd_params params = {
345 		.pdn = pdn,
346 	};
347 
348 	return efa_com_dealloc_pd(&dev->edev, &params);
349 }
350 
efa_alloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)351 int efa_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
352 {
353 	struct efa_dev *dev = to_edev(ibpd->device);
354 	struct efa_ibv_alloc_pd_resp resp = {};
355 	struct efa_com_alloc_pd_result result;
356 	struct efa_pd *pd = to_epd(ibpd);
357 	int err;
358 
359 	if (udata->inlen &&
360 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
361 		ibdev_dbg(&dev->ibdev,
362 			  "Incompatible ABI params, udata not cleared\n");
363 		err = -EINVAL;
364 		goto err_out;
365 	}
366 
367 	err = efa_com_alloc_pd(&dev->edev, &result);
368 	if (err)
369 		goto err_out;
370 
371 	pd->pdn = result.pdn;
372 	resp.pdn = result.pdn;
373 
374 	if (udata->outlen) {
375 		err = ib_copy_to_udata(udata, &resp,
376 				       min(sizeof(resp), udata->outlen));
377 		if (err) {
378 			ibdev_dbg(&dev->ibdev,
379 				  "Failed to copy udata for alloc_pd\n");
380 			goto err_dealloc_pd;
381 		}
382 	}
383 
384 	ibdev_dbg(&dev->ibdev, "Allocated pd[%d]\n", pd->pdn);
385 
386 	return 0;
387 
388 err_dealloc_pd:
389 	efa_pd_dealloc(dev, result.pdn);
390 err_out:
391 	atomic64_inc(&dev->stats.alloc_pd_err);
392 	return err;
393 }
394 
efa_dealloc_pd(struct ib_pd * ibpd,struct ib_udata * udata)395 int efa_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
396 {
397 	struct efa_dev *dev = to_edev(ibpd->device);
398 	struct efa_pd *pd = to_epd(ibpd);
399 
400 	ibdev_dbg(&dev->ibdev, "Dealloc pd[%d]\n", pd->pdn);
401 	efa_pd_dealloc(dev, pd->pdn);
402 	return 0;
403 }
404 
efa_destroy_qp_handle(struct efa_dev * dev,u32 qp_handle)405 static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
406 {
407 	struct efa_com_destroy_qp_params params = { .qp_handle = qp_handle };
408 
409 	return efa_com_destroy_qp(&dev->edev, &params);
410 }
411 
efa_qp_user_mmap_entries_remove(struct efa_qp * qp)412 static void efa_qp_user_mmap_entries_remove(struct efa_qp *qp)
413 {
414 	rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
415 	rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
416 	rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
417 	rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
418 }
419 
efa_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)420 int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
421 {
422 	struct efa_dev *dev = to_edev(ibqp->pd->device);
423 	struct efa_qp *qp = to_eqp(ibqp);
424 	int err;
425 
426 	ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
427 
428 	efa_qp_user_mmap_entries_remove(qp);
429 
430 	err = efa_destroy_qp_handle(dev, qp->qp_handle);
431 	if (err)
432 		return err;
433 
434 	if (qp->rq_cpu_addr) {
435 		ibdev_dbg(&dev->ibdev,
436 			  "qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
437 			  qp->rq_cpu_addr, qp->rq_size,
438 			  &qp->rq_dma_addr);
439 		efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
440 				qp->rq_size, DMA_TO_DEVICE);
441 	}
442 
443 	kfree(qp);
444 	return 0;
445 }
446 
447 static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext * ucontext,u64 address,size_t length,u8 mmap_flag,u64 * offset)448 efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
449 			   u64 address, size_t length,
450 			   u8 mmap_flag, u64 *offset)
451 {
452 	struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
453 	int err;
454 
455 	if (!entry)
456 		return NULL;
457 
458 	entry->address = address;
459 	entry->mmap_flag = mmap_flag;
460 
461 	err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
462 					  length);
463 	if (err) {
464 		kfree(entry);
465 		return NULL;
466 	}
467 	*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
468 
469 	return &entry->rdma_entry;
470 }
471 
qp_mmap_entries_setup(struct efa_qp * qp,struct efa_dev * dev,struct efa_ucontext * ucontext,struct efa_com_create_qp_params * params,struct efa_ibv_create_qp_resp * resp)472 static int qp_mmap_entries_setup(struct efa_qp *qp,
473 				 struct efa_dev *dev,
474 				 struct efa_ucontext *ucontext,
475 				 struct efa_com_create_qp_params *params,
476 				 struct efa_ibv_create_qp_resp *resp)
477 {
478 	size_t length;
479 	u64 address;
480 
481 	address = dev->db_bar_addr + resp->sq_db_offset;
482 	qp->sq_db_mmap_entry =
483 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
484 					   address,
485 					   PAGE_SIZE, EFA_MMAP_IO_NC,
486 					   &resp->sq_db_mmap_key);
487 	if (!qp->sq_db_mmap_entry)
488 		return -ENOMEM;
489 
490 	resp->sq_db_offset &= ~PAGE_MASK;
491 
492 	address = dev->mem_bar_addr + resp->llq_desc_offset;
493 	length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
494 			    (resp->llq_desc_offset & ~PAGE_MASK));
495 
496 	qp->llq_desc_mmap_entry =
497 		efa_user_mmap_entry_insert(&ucontext->ibucontext,
498 					   address, length,
499 					   EFA_MMAP_IO_WC,
500 					   &resp->llq_desc_mmap_key);
501 	if (!qp->llq_desc_mmap_entry)
502 		goto err_remove_mmap;
503 
504 	resp->llq_desc_offset &= ~PAGE_MASK;
505 
506 	if (qp->rq_size) {
507 		address = dev->db_bar_addr + resp->rq_db_offset;
508 
509 		qp->rq_db_mmap_entry =
510 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
511 						   address, PAGE_SIZE,
512 						   EFA_MMAP_IO_NC,
513 						   &resp->rq_db_mmap_key);
514 		if (!qp->rq_db_mmap_entry)
515 			goto err_remove_mmap;
516 
517 		resp->rq_db_offset &= ~PAGE_MASK;
518 
519 		address = virt_to_phys(qp->rq_cpu_addr);
520 		qp->rq_mmap_entry =
521 			efa_user_mmap_entry_insert(&ucontext->ibucontext,
522 						   address, qp->rq_size,
523 						   EFA_MMAP_DMA_PAGE,
524 						   &resp->rq_mmap_key);
525 		if (!qp->rq_mmap_entry)
526 			goto err_remove_mmap;
527 
528 		resp->rq_mmap_size = qp->rq_size;
529 	}
530 
531 	return 0;
532 
533 err_remove_mmap:
534 	efa_qp_user_mmap_entries_remove(qp);
535 
536 	return -ENOMEM;
537 }
538 
efa_qp_validate_cap(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)539 static int efa_qp_validate_cap(struct efa_dev *dev,
540 			       struct ib_qp_init_attr *init_attr)
541 {
542 	if (init_attr->cap.max_send_wr > dev->dev_attr.max_sq_depth) {
543 		ibdev_dbg(&dev->ibdev,
544 			  "qp: requested send wr[%u] exceeds the max[%u]\n",
545 			  init_attr->cap.max_send_wr,
546 			  dev->dev_attr.max_sq_depth);
547 		return -EINVAL;
548 	}
549 	if (init_attr->cap.max_recv_wr > dev->dev_attr.max_rq_depth) {
550 		ibdev_dbg(&dev->ibdev,
551 			  "qp: requested receive wr[%u] exceeds the max[%u]\n",
552 			  init_attr->cap.max_recv_wr,
553 			  dev->dev_attr.max_rq_depth);
554 		return -EINVAL;
555 	}
556 	if (init_attr->cap.max_send_sge > dev->dev_attr.max_sq_sge) {
557 		ibdev_dbg(&dev->ibdev,
558 			  "qp: requested sge send[%u] exceeds the max[%u]\n",
559 			  init_attr->cap.max_send_sge, dev->dev_attr.max_sq_sge);
560 		return -EINVAL;
561 	}
562 	if (init_attr->cap.max_recv_sge > dev->dev_attr.max_rq_sge) {
563 		ibdev_dbg(&dev->ibdev,
564 			  "qp: requested sge recv[%u] exceeds the max[%u]\n",
565 			  init_attr->cap.max_recv_sge, dev->dev_attr.max_rq_sge);
566 		return -EINVAL;
567 	}
568 	if (init_attr->cap.max_inline_data > dev->dev_attr.inline_buf_size) {
569 		ibdev_dbg(&dev->ibdev,
570 			  "qp: requested inline data[%u] exceeds the max[%u]\n",
571 			  init_attr->cap.max_inline_data,
572 			  dev->dev_attr.inline_buf_size);
573 		return -EINVAL;
574 	}
575 
576 	return 0;
577 }
578 
efa_qp_validate_attr(struct efa_dev * dev,struct ib_qp_init_attr * init_attr)579 static int efa_qp_validate_attr(struct efa_dev *dev,
580 				struct ib_qp_init_attr *init_attr)
581 {
582 	if (init_attr->qp_type != IB_QPT_DRIVER &&
583 	    init_attr->qp_type != IB_QPT_UD) {
584 		ibdev_dbg(&dev->ibdev,
585 			  "Unsupported qp type %d\n", init_attr->qp_type);
586 		return -EOPNOTSUPP;
587 	}
588 
589 	if (init_attr->srq) {
590 		ibdev_dbg(&dev->ibdev, "SRQ is not supported\n");
591 		return -EOPNOTSUPP;
592 	}
593 
594 	if (init_attr->create_flags) {
595 		ibdev_dbg(&dev->ibdev, "Unsupported create flags\n");
596 		return -EOPNOTSUPP;
597 	}
598 
599 	return 0;
600 }
601 
efa_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)602 struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
603 			    struct ib_qp_init_attr *init_attr,
604 			    struct ib_udata *udata)
605 {
606 	struct efa_com_create_qp_params create_qp_params = {};
607 	struct efa_com_create_qp_result create_qp_resp;
608 	struct efa_dev *dev = to_edev(ibpd->device);
609 	struct efa_ibv_create_qp_resp resp = {};
610 	struct efa_ibv_create_qp cmd = {};
611 	struct efa_ucontext *ucontext;
612 	struct efa_qp *qp;
613 	int err;
614 
615 	ucontext = rdma_udata_to_drv_context(udata, struct efa_ucontext,
616 					     ibucontext);
617 
618 	err = efa_qp_validate_cap(dev, init_attr);
619 	if (err)
620 		goto err_out;
621 
622 	err = efa_qp_validate_attr(dev, init_attr);
623 	if (err)
624 		goto err_out;
625 
626 	if (offsetofend(typeof(cmd), driver_qp_type) > udata->inlen) {
627 		ibdev_dbg(&dev->ibdev,
628 			  "Incompatible ABI params, no input udata\n");
629 		err = -EINVAL;
630 		goto err_out;
631 	}
632 
633 	if (udata->inlen > sizeof(cmd) &&
634 	    !ib_is_udata_cleared(udata, sizeof(cmd),
635 				 udata->inlen - sizeof(cmd))) {
636 		ibdev_dbg(&dev->ibdev,
637 			  "Incompatible ABI params, unknown fields in udata\n");
638 		err = -EINVAL;
639 		goto err_out;
640 	}
641 
642 	err = ib_copy_from_udata(&cmd, udata,
643 				 min(sizeof(cmd), udata->inlen));
644 	if (err) {
645 		ibdev_dbg(&dev->ibdev,
646 			  "Cannot copy udata for create_qp\n");
647 		goto err_out;
648 	}
649 
650 	if (cmd.comp_mask) {
651 		ibdev_dbg(&dev->ibdev,
652 			  "Incompatible ABI params, unknown fields in udata\n");
653 		err = -EINVAL;
654 		goto err_out;
655 	}
656 
657 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
658 	if (!qp) {
659 		err = -ENOMEM;
660 		goto err_out;
661 	}
662 
663 	create_qp_params.uarn = ucontext->uarn;
664 	create_qp_params.pd = to_epd(ibpd)->pdn;
665 
666 	if (init_attr->qp_type == IB_QPT_UD) {
667 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_UD;
668 	} else if (cmd.driver_qp_type == EFA_QP_DRIVER_TYPE_SRD) {
669 		create_qp_params.qp_type = EFA_ADMIN_QP_TYPE_SRD;
670 	} else {
671 		ibdev_dbg(&dev->ibdev,
672 			  "Unsupported qp type %d driver qp type %d\n",
673 			  init_attr->qp_type, cmd.driver_qp_type);
674 		err = -EOPNOTSUPP;
675 		goto err_free_qp;
676 	}
677 
678 	ibdev_dbg(&dev->ibdev, "Create QP: qp type %d driver qp type %#x\n",
679 		  init_attr->qp_type, cmd.driver_qp_type);
680 	create_qp_params.send_cq_idx = to_ecq(init_attr->send_cq)->cq_idx;
681 	create_qp_params.recv_cq_idx = to_ecq(init_attr->recv_cq)->cq_idx;
682 	create_qp_params.sq_depth = init_attr->cap.max_send_wr;
683 	create_qp_params.sq_ring_size_in_bytes = cmd.sq_ring_size;
684 
685 	create_qp_params.rq_depth = init_attr->cap.max_recv_wr;
686 	create_qp_params.rq_ring_size_in_bytes = cmd.rq_ring_size;
687 	qp->rq_size = PAGE_ALIGN(create_qp_params.rq_ring_size_in_bytes);
688 	if (qp->rq_size) {
689 		qp->rq_cpu_addr = efa_zalloc_mapped(dev, &qp->rq_dma_addr,
690 						    qp->rq_size, DMA_TO_DEVICE);
691 		if (!qp->rq_cpu_addr) {
692 			err = -ENOMEM;
693 			goto err_free_qp;
694 		}
695 
696 		ibdev_dbg(&dev->ibdev,
697 			  "qp->cpu_addr[0x%p] allocated: size[%lu], dma[%pad]\n",
698 			  qp->rq_cpu_addr, qp->rq_size, &qp->rq_dma_addr);
699 		create_qp_params.rq_base_addr = qp->rq_dma_addr;
700 	}
701 
702 	err = efa_com_create_qp(&dev->edev, &create_qp_params,
703 				&create_qp_resp);
704 	if (err)
705 		goto err_free_mapped;
706 
707 	resp.sq_db_offset = create_qp_resp.sq_db_offset;
708 	resp.rq_db_offset = create_qp_resp.rq_db_offset;
709 	resp.llq_desc_offset = create_qp_resp.llq_descriptors_offset;
710 	resp.send_sub_cq_idx = create_qp_resp.send_sub_cq_idx;
711 	resp.recv_sub_cq_idx = create_qp_resp.recv_sub_cq_idx;
712 
713 	err = qp_mmap_entries_setup(qp, dev, ucontext, &create_qp_params,
714 				    &resp);
715 	if (err)
716 		goto err_destroy_qp;
717 
718 	qp->qp_handle = create_qp_resp.qp_handle;
719 	qp->ibqp.qp_num = create_qp_resp.qp_num;
720 	qp->max_send_wr = init_attr->cap.max_send_wr;
721 	qp->max_recv_wr = init_attr->cap.max_recv_wr;
722 	qp->max_send_sge = init_attr->cap.max_send_sge;
723 	qp->max_recv_sge = init_attr->cap.max_recv_sge;
724 	qp->max_inline_data = init_attr->cap.max_inline_data;
725 
726 	if (udata->outlen) {
727 		err = ib_copy_to_udata(udata, &resp,
728 				       min(sizeof(resp), udata->outlen));
729 		if (err) {
730 			ibdev_dbg(&dev->ibdev,
731 				  "Failed to copy udata for qp[%u]\n",
732 				  create_qp_resp.qp_num);
733 			goto err_remove_mmap_entries;
734 		}
735 	}
736 
737 	ibdev_dbg(&dev->ibdev, "Created qp[%d]\n", qp->ibqp.qp_num);
738 
739 	return &qp->ibqp;
740 
741 err_remove_mmap_entries:
742 	efa_qp_user_mmap_entries_remove(qp);
743 err_destroy_qp:
744 	efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
745 err_free_mapped:
746 	if (qp->rq_size)
747 		efa_free_mapped(dev, qp->rq_cpu_addr, qp->rq_dma_addr,
748 				qp->rq_size, DMA_TO_DEVICE);
749 err_free_qp:
750 	kfree(qp);
751 err_out:
752 	atomic64_inc(&dev->stats.create_qp_err);
753 	return ERR_PTR(err);
754 }
755 
756 static const struct {
757 	int			valid;
758 	enum ib_qp_attr_mask	req_param;
759 	enum ib_qp_attr_mask	opt_param;
760 } srd_qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
761 	[IB_QPS_RESET] = {
762 		[IB_QPS_RESET] = { .valid = 1 },
763 		[IB_QPS_INIT]  = {
764 			.valid = 1,
765 			.req_param = IB_QP_PKEY_INDEX |
766 				     IB_QP_PORT |
767 				     IB_QP_QKEY,
768 		},
769 	},
770 	[IB_QPS_INIT] = {
771 		[IB_QPS_RESET] = { .valid = 1 },
772 		[IB_QPS_ERR]   = { .valid = 1 },
773 		[IB_QPS_INIT]  = {
774 			.valid = 1,
775 			.opt_param = IB_QP_PKEY_INDEX |
776 				     IB_QP_PORT |
777 				     IB_QP_QKEY,
778 		},
779 		[IB_QPS_RTR]   = {
780 			.valid = 1,
781 			.opt_param = IB_QP_PKEY_INDEX |
782 				     IB_QP_QKEY,
783 		},
784 	},
785 	[IB_QPS_RTR] = {
786 		[IB_QPS_RESET] = { .valid = 1 },
787 		[IB_QPS_ERR]   = { .valid = 1 },
788 		[IB_QPS_RTS]   = {
789 			.valid = 1,
790 			.req_param = IB_QP_SQ_PSN,
791 			.opt_param = IB_QP_CUR_STATE |
792 				     IB_QP_QKEY |
793 				     IB_QP_RNR_RETRY,
794 
795 		}
796 	},
797 	[IB_QPS_RTS] = {
798 		[IB_QPS_RESET] = { .valid = 1 },
799 		[IB_QPS_ERR]   = { .valid = 1 },
800 		[IB_QPS_RTS]   = {
801 			.valid = 1,
802 			.opt_param = IB_QP_CUR_STATE |
803 				     IB_QP_QKEY,
804 		},
805 		[IB_QPS_SQD] = {
806 			.valid = 1,
807 			.opt_param = IB_QP_EN_SQD_ASYNC_NOTIFY,
808 		},
809 	},
810 	[IB_QPS_SQD] = {
811 		[IB_QPS_RESET] = { .valid = 1 },
812 		[IB_QPS_ERR]   = { .valid = 1 },
813 		[IB_QPS_RTS]   = {
814 			.valid = 1,
815 			.opt_param = IB_QP_CUR_STATE |
816 				     IB_QP_QKEY,
817 		},
818 		[IB_QPS_SQD] = {
819 			.valid = 1,
820 			.opt_param = IB_QP_PKEY_INDEX |
821 				     IB_QP_QKEY,
822 		}
823 	},
824 	[IB_QPS_SQE] = {
825 		[IB_QPS_RESET] = { .valid = 1 },
826 		[IB_QPS_ERR]   = { .valid = 1 },
827 		[IB_QPS_RTS]   = {
828 			.valid = 1,
829 			.opt_param = IB_QP_CUR_STATE |
830 				     IB_QP_QKEY,
831 		}
832 	},
833 	[IB_QPS_ERR] = {
834 		[IB_QPS_RESET] = { .valid = 1 },
835 		[IB_QPS_ERR]   = { .valid = 1 },
836 	}
837 };
838 
efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,enum ib_qp_state next_state,enum ib_qp_attr_mask mask)839 static bool efa_modify_srd_qp_is_ok(enum ib_qp_state cur_state,
840 				    enum ib_qp_state next_state,
841 				    enum ib_qp_attr_mask mask)
842 {
843 	enum ib_qp_attr_mask req_param, opt_param;
844 
845 	if (mask & IB_QP_CUR_STATE  &&
846 	    cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
847 	    cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
848 		return false;
849 
850 	if (!srd_qp_state_table[cur_state][next_state].valid)
851 		return false;
852 
853 	req_param = srd_qp_state_table[cur_state][next_state].req_param;
854 	opt_param = srd_qp_state_table[cur_state][next_state].opt_param;
855 
856 	if ((mask & req_param) != req_param)
857 		return false;
858 
859 	if (mask & ~(req_param | opt_param | IB_QP_STATE))
860 		return false;
861 
862 	return true;
863 }
864 
efa_modify_qp_validate(struct efa_dev * dev,struct efa_qp * qp,struct ib_qp_attr * qp_attr,int qp_attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)865 static int efa_modify_qp_validate(struct efa_dev *dev, struct efa_qp *qp,
866 				  struct ib_qp_attr *qp_attr, int qp_attr_mask,
867 				  enum ib_qp_state cur_state,
868 				  enum ib_qp_state new_state)
869 {
870 	int err;
871 
872 #define EFA_MODIFY_QP_SUPP_MASK \
873 	(IB_QP_STATE | IB_QP_CUR_STATE | IB_QP_EN_SQD_ASYNC_NOTIFY | \
874 	 IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_QKEY | IB_QP_SQ_PSN | \
875 	 IB_QP_RNR_RETRY)
876 
877 	if (qp_attr_mask & ~EFA_MODIFY_QP_SUPP_MASK) {
878 		ibdev_dbg(&dev->ibdev,
879 			  "Unsupported qp_attr_mask[%#x] supported[%#x]\n",
880 			  qp_attr_mask, EFA_MODIFY_QP_SUPP_MASK);
881 		return -EOPNOTSUPP;
882 	}
883 
884 	if (qp->ibqp.qp_type == IB_QPT_DRIVER)
885 		err = !efa_modify_srd_qp_is_ok(cur_state, new_state,
886 					       qp_attr_mask);
887 	else
888 		err = !ib_modify_qp_is_ok(cur_state, new_state, IB_QPT_UD,
889 					  qp_attr_mask);
890 
891 	if (err) {
892 		ibdev_dbg(&dev->ibdev, "Invalid modify QP parameters\n");
893 		return -EINVAL;
894 	}
895 
896 	if ((qp_attr_mask & IB_QP_PORT) && qp_attr->port_num != 1) {
897 		ibdev_dbg(&dev->ibdev, "Can't change port num\n");
898 		return -EOPNOTSUPP;
899 	}
900 
901 	if ((qp_attr_mask & IB_QP_PKEY_INDEX) && qp_attr->pkey_index) {
902 		ibdev_dbg(&dev->ibdev, "Can't change pkey index\n");
903 		return -EOPNOTSUPP;
904 	}
905 
906 	return 0;
907 }
908 
efa_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_udata * udata)909 int efa_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
910 		  int qp_attr_mask, struct ib_udata *udata)
911 {
912 	struct efa_dev *dev = to_edev(ibqp->device);
913 	struct efa_com_modify_qp_params params = {};
914 	struct efa_qp *qp = to_eqp(ibqp);
915 	enum ib_qp_state cur_state;
916 	enum ib_qp_state new_state;
917 	int err;
918 
919 	if (udata->inlen &&
920 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
921 		ibdev_dbg(&dev->ibdev,
922 			  "Incompatible ABI params, udata not cleared\n");
923 		return -EINVAL;
924 	}
925 
926 	cur_state = qp_attr_mask & IB_QP_CUR_STATE ? qp_attr->cur_qp_state :
927 						     qp->state;
928 	new_state = qp_attr_mask & IB_QP_STATE ? qp_attr->qp_state : cur_state;
929 
930 	err = efa_modify_qp_validate(dev, qp, qp_attr, qp_attr_mask, cur_state,
931 				     new_state);
932 	if (err)
933 		return err;
934 
935 	params.qp_handle = qp->qp_handle;
936 
937 	if (qp_attr_mask & IB_QP_STATE) {
938 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QP_STATE,
939 			1);
940 		EFA_SET(&params.modify_mask,
941 			EFA_ADMIN_MODIFY_QP_CMD_CUR_QP_STATE, 1);
942 		params.cur_qp_state = cur_state;
943 		params.qp_state = new_state;
944 	}
945 
946 	if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
947 		EFA_SET(&params.modify_mask,
948 			EFA_ADMIN_MODIFY_QP_CMD_SQ_DRAINED_ASYNC_NOTIFY, 1);
949 		params.sq_drained_async_notify = qp_attr->en_sqd_async_notify;
950 	}
951 
952 	if (qp_attr_mask & IB_QP_QKEY) {
953 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_QKEY, 1);
954 		params.qkey = qp_attr->qkey;
955 	}
956 
957 	if (qp_attr_mask & IB_QP_SQ_PSN) {
958 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_SQ_PSN, 1);
959 		params.sq_psn = qp_attr->sq_psn;
960 	}
961 
962 	if (qp_attr_mask & IB_QP_RNR_RETRY) {
963 		EFA_SET(&params.modify_mask, EFA_ADMIN_MODIFY_QP_CMD_RNR_RETRY,
964 			1);
965 		params.rnr_retry = qp_attr->rnr_retry;
966 	}
967 
968 	err = efa_com_modify_qp(&dev->edev, &params);
969 	if (err)
970 		return err;
971 
972 	qp->state = new_state;
973 
974 	return 0;
975 }
976 
efa_destroy_cq_idx(struct efa_dev * dev,int cq_idx)977 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
978 {
979 	struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx };
980 
981 	return efa_com_destroy_cq(&dev->edev, &params);
982 }
983 
efa_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)984 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
985 {
986 	struct efa_dev *dev = to_edev(ibcq->device);
987 	struct efa_cq *cq = to_ecq(ibcq);
988 
989 	ibdev_dbg(&dev->ibdev,
990 		  "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
991 		  cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
992 
993 	rdma_user_mmap_entry_remove(cq->mmap_entry);
994 	efa_destroy_cq_idx(dev, cq->cq_idx);
995 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
996 			DMA_FROM_DEVICE);
997 	return 0;
998 }
999 
cq_mmap_entries_setup(struct efa_dev * dev,struct efa_cq * cq,struct efa_ibv_create_cq_resp * resp)1000 static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
1001 				 struct efa_ibv_create_cq_resp *resp)
1002 {
1003 	resp->q_mmap_size = cq->size;
1004 	cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
1005 						    virt_to_phys(cq->cpu_addr),
1006 						    cq->size, EFA_MMAP_DMA_PAGE,
1007 						    &resp->q_mmap_key);
1008 	if (!cq->mmap_entry)
1009 		return -ENOMEM;
1010 
1011 	return 0;
1012 }
1013 
efa_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)1014 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1015 		  struct ib_udata *udata)
1016 {
1017 	struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
1018 		udata, struct efa_ucontext, ibucontext);
1019 	struct efa_ibv_create_cq_resp resp = {};
1020 	struct efa_com_create_cq_params params;
1021 	struct efa_com_create_cq_result result;
1022 	struct ib_device *ibdev = ibcq->device;
1023 	struct efa_dev *dev = to_edev(ibdev);
1024 	struct efa_ibv_create_cq cmd = {};
1025 	struct efa_cq *cq = to_ecq(ibcq);
1026 	int entries = attr->cqe;
1027 	int err;
1028 
1029 	ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
1030 
1031 	if (entries < 1 || entries > dev->dev_attr.max_cq_depth) {
1032 		ibdev_dbg(ibdev,
1033 			  "cq: requested entries[%u] non-positive or greater than max[%u]\n",
1034 			  entries, dev->dev_attr.max_cq_depth);
1035 		err = -EINVAL;
1036 		goto err_out;
1037 	}
1038 
1039 	if (offsetofend(typeof(cmd), num_sub_cqs) > udata->inlen) {
1040 		ibdev_dbg(ibdev,
1041 			  "Incompatible ABI params, no input udata\n");
1042 		err = -EINVAL;
1043 		goto err_out;
1044 	}
1045 
1046 	if (udata->inlen > sizeof(cmd) &&
1047 	    !ib_is_udata_cleared(udata, sizeof(cmd),
1048 				 udata->inlen - sizeof(cmd))) {
1049 		ibdev_dbg(ibdev,
1050 			  "Incompatible ABI params, unknown fields in udata\n");
1051 		err = -EINVAL;
1052 		goto err_out;
1053 	}
1054 
1055 	err = ib_copy_from_udata(&cmd, udata,
1056 				 min(sizeof(cmd), udata->inlen));
1057 	if (err) {
1058 		ibdev_dbg(ibdev, "Cannot copy udata for create_cq\n");
1059 		goto err_out;
1060 	}
1061 
1062 	if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
1063 		ibdev_dbg(ibdev,
1064 			  "Incompatible ABI params, unknown fields in udata\n");
1065 		err = -EINVAL;
1066 		goto err_out;
1067 	}
1068 
1069 	if (!cmd.cq_entry_size) {
1070 		ibdev_dbg(ibdev,
1071 			  "Invalid entry size [%u]\n", cmd.cq_entry_size);
1072 		err = -EINVAL;
1073 		goto err_out;
1074 	}
1075 
1076 	if (cmd.num_sub_cqs != dev->dev_attr.sub_cqs_per_cq) {
1077 		ibdev_dbg(ibdev,
1078 			  "Invalid number of sub cqs[%u] expected[%u]\n",
1079 			  cmd.num_sub_cqs, dev->dev_attr.sub_cqs_per_cq);
1080 		err = -EINVAL;
1081 		goto err_out;
1082 	}
1083 
1084 	cq->ucontext = ucontext;
1085 	cq->size = PAGE_ALIGN(cmd.cq_entry_size * entries * cmd.num_sub_cqs);
1086 	cq->cpu_addr = efa_zalloc_mapped(dev, &cq->dma_addr, cq->size,
1087 					 DMA_FROM_DEVICE);
1088 	if (!cq->cpu_addr) {
1089 		err = -ENOMEM;
1090 		goto err_out;
1091 	}
1092 
1093 	params.uarn = cq->ucontext->uarn;
1094 	params.cq_depth = entries;
1095 	params.dma_addr = cq->dma_addr;
1096 	params.entry_size_in_bytes = cmd.cq_entry_size;
1097 	params.num_sub_cqs = cmd.num_sub_cqs;
1098 	err = efa_com_create_cq(&dev->edev, &params, &result);
1099 	if (err)
1100 		goto err_free_mapped;
1101 
1102 	resp.cq_idx = result.cq_idx;
1103 	cq->cq_idx = result.cq_idx;
1104 	cq->ibcq.cqe = result.actual_depth;
1105 	WARN_ON_ONCE(entries != result.actual_depth);
1106 
1107 	err = cq_mmap_entries_setup(dev, cq, &resp);
1108 	if (err) {
1109 		ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
1110 			  cq->cq_idx);
1111 		goto err_destroy_cq;
1112 	}
1113 
1114 	if (udata->outlen) {
1115 		err = ib_copy_to_udata(udata, &resp,
1116 				       min(sizeof(resp), udata->outlen));
1117 		if (err) {
1118 			ibdev_dbg(ibdev,
1119 				  "Failed to copy udata for create_cq\n");
1120 			goto err_remove_mmap;
1121 		}
1122 	}
1123 
1124 	ibdev_dbg(ibdev, "Created cq[%d], cq depth[%u]. dma[%pad] virt[0x%p]\n",
1125 		  cq->cq_idx, result.actual_depth, &cq->dma_addr, cq->cpu_addr);
1126 
1127 	return 0;
1128 
1129 err_remove_mmap:
1130 	rdma_user_mmap_entry_remove(cq->mmap_entry);
1131 err_destroy_cq:
1132 	efa_destroy_cq_idx(dev, cq->cq_idx);
1133 err_free_mapped:
1134 	efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
1135 			DMA_FROM_DEVICE);
1136 
1137 err_out:
1138 	atomic64_inc(&dev->stats.create_cq_err);
1139 	return err;
1140 }
1141 
umem_to_page_list(struct efa_dev * dev,struct ib_umem * umem,u64 * page_list,u32 hp_cnt,u8 hp_shift)1142 static int umem_to_page_list(struct efa_dev *dev,
1143 			     struct ib_umem *umem,
1144 			     u64 *page_list,
1145 			     u32 hp_cnt,
1146 			     u8 hp_shift)
1147 {
1148 	u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
1149 	struct ib_block_iter biter;
1150 	unsigned int hp_idx = 0;
1151 
1152 	ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
1153 		  hp_cnt, pages_in_hp);
1154 
1155 	rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift))
1156 		page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
1157 
1158 	return 0;
1159 }
1160 
efa_vmalloc_buf_to_sg(u64 * buf,int page_cnt)1161 static struct scatterlist *efa_vmalloc_buf_to_sg(u64 *buf, int page_cnt)
1162 {
1163 	struct scatterlist *sglist;
1164 	struct page *pg;
1165 	int i;
1166 
1167 	sglist = kmalloc_array(page_cnt, sizeof(*sglist), GFP_KERNEL);
1168 	if (!sglist)
1169 		return NULL;
1170 	sg_init_table(sglist, page_cnt);
1171 	for (i = 0; i < page_cnt; i++) {
1172 		pg = vmalloc_to_page(buf);
1173 		if (!pg)
1174 			goto err;
1175 		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
1176 		buf += PAGE_SIZE / sizeof(*buf);
1177 	}
1178 	return sglist;
1179 
1180 err:
1181 	kfree(sglist);
1182 	return NULL;
1183 }
1184 
1185 /*
1186  * create a chunk list of physical pages dma addresses from the supplied
1187  * scatter gather list
1188  */
pbl_chunk_list_create(struct efa_dev * dev,struct pbl_context * pbl)1189 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl)
1190 {
1191 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1192 	int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages;
1193 	struct scatterlist *pages_sgl = pbl->phys.indirect.sgl;
1194 	unsigned int chunk_list_size, chunk_idx, payload_idx;
1195 	int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt;
1196 	struct efa_com_ctrl_buff_info *ctrl_buf;
1197 	u64 *cur_chunk_buf, *prev_chunk_buf;
1198 	struct ib_block_iter biter;
1199 	dma_addr_t dma_addr;
1200 	int i;
1201 
1202 	/* allocate a chunk list that consists of 4KB chunks */
1203 	chunk_list_size = DIV_ROUND_UP(page_cnt, EFA_PTRS_PER_CHUNK);
1204 
1205 	chunk_list->size = chunk_list_size;
1206 	chunk_list->chunks = kcalloc(chunk_list_size,
1207 				     sizeof(*chunk_list->chunks),
1208 				     GFP_KERNEL);
1209 	if (!chunk_list->chunks)
1210 		return -ENOMEM;
1211 
1212 	ibdev_dbg(&dev->ibdev,
1213 		  "chunk_list_size[%u] - pages[%u]\n", chunk_list_size,
1214 		  page_cnt);
1215 
1216 	/* allocate chunk buffers: */
1217 	for (i = 0; i < chunk_list_size; i++) {
1218 		chunk_list->chunks[i].buf = kzalloc(EFA_CHUNK_SIZE, GFP_KERNEL);
1219 		if (!chunk_list->chunks[i].buf)
1220 			goto chunk_list_dealloc;
1221 
1222 		chunk_list->chunks[i].length = EFA_CHUNK_USED_SIZE;
1223 	}
1224 	chunk_list->chunks[chunk_list_size - 1].length =
1225 		((page_cnt % EFA_PTRS_PER_CHUNK) * EFA_CHUNK_PAYLOAD_PTR_SIZE) +
1226 			EFA_CHUNK_PTR_SIZE;
1227 
1228 	/* fill the dma addresses of sg list pages to chunks: */
1229 	chunk_idx = 0;
1230 	payload_idx = 0;
1231 	cur_chunk_buf = chunk_list->chunks[0].buf;
1232 	rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt,
1233 			    EFA_CHUNK_PAYLOAD_SIZE) {
1234 		cur_chunk_buf[payload_idx++] =
1235 			rdma_block_iter_dma_address(&biter);
1236 
1237 		if (payload_idx == EFA_PTRS_PER_CHUNK) {
1238 			chunk_idx++;
1239 			cur_chunk_buf = chunk_list->chunks[chunk_idx].buf;
1240 			payload_idx = 0;
1241 		}
1242 	}
1243 
1244 	/* map chunks to dma and fill chunks next ptrs */
1245 	for (i = chunk_list_size - 1; i >= 0; i--) {
1246 		dma_addr = dma_map_single(&dev->pdev->dev,
1247 					  chunk_list->chunks[i].buf,
1248 					  chunk_list->chunks[i].length,
1249 					  DMA_TO_DEVICE);
1250 		if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1251 			ibdev_err(&dev->ibdev,
1252 				  "chunk[%u] dma_map_failed\n", i);
1253 			goto chunk_list_unmap;
1254 		}
1255 
1256 		chunk_list->chunks[i].dma_addr = dma_addr;
1257 		ibdev_dbg(&dev->ibdev,
1258 			  "chunk[%u] mapped at [%pad]\n", i, &dma_addr);
1259 
1260 		if (!i)
1261 			break;
1262 
1263 		prev_chunk_buf = chunk_list->chunks[i - 1].buf;
1264 
1265 		ctrl_buf = (struct efa_com_ctrl_buff_info *)
1266 				&prev_chunk_buf[EFA_PTRS_PER_CHUNK];
1267 		ctrl_buf->length = chunk_list->chunks[i].length;
1268 
1269 		efa_com_set_dma_addr(dma_addr,
1270 				     &ctrl_buf->address.mem_addr_high,
1271 				     &ctrl_buf->address.mem_addr_low);
1272 	}
1273 
1274 	return 0;
1275 
1276 chunk_list_unmap:
1277 	for (; i < chunk_list_size; i++) {
1278 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1279 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1280 	}
1281 chunk_list_dealloc:
1282 	for (i = 0; i < chunk_list_size; i++)
1283 		kfree(chunk_list->chunks[i].buf);
1284 
1285 	kfree(chunk_list->chunks);
1286 	return -ENOMEM;
1287 }
1288 
pbl_chunk_list_destroy(struct efa_dev * dev,struct pbl_context * pbl)1289 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1290 {
1291 	struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list;
1292 	int i;
1293 
1294 	for (i = 0; i < chunk_list->size; i++) {
1295 		dma_unmap_single(&dev->pdev->dev, chunk_list->chunks[i].dma_addr,
1296 				 chunk_list->chunks[i].length, DMA_TO_DEVICE);
1297 		kfree(chunk_list->chunks[i].buf);
1298 	}
1299 
1300 	kfree(chunk_list->chunks);
1301 }
1302 
1303 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
pbl_continuous_initialize(struct efa_dev * dev,struct pbl_context * pbl)1304 static int pbl_continuous_initialize(struct efa_dev *dev,
1305 				     struct pbl_context *pbl)
1306 {
1307 	dma_addr_t dma_addr;
1308 
1309 	dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf,
1310 				  pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1311 	if (dma_mapping_error(&dev->pdev->dev, dma_addr)) {
1312 		ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n");
1313 		return -ENOMEM;
1314 	}
1315 
1316 	pbl->phys.continuous.dma_addr = dma_addr;
1317 	ibdev_dbg(&dev->ibdev,
1318 		  "pbl continuous - dma_addr = %pad, size[%u]\n",
1319 		  &dma_addr, pbl->pbl_buf_size_in_bytes);
1320 
1321 	return 0;
1322 }
1323 
1324 /*
1325  * initialize pbl indirect mode:
1326  * create a chunk list out of the dma addresses of the physical pages of
1327  * pbl buffer.
1328  */
pbl_indirect_initialize(struct efa_dev * dev,struct pbl_context * pbl)1329 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
1330 {
1331 	u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
1332 	struct scatterlist *sgl;
1333 	int sg_dma_cnt, err;
1334 
1335 	BUILD_BUG_ON(EFA_CHUNK_PAYLOAD_SIZE > PAGE_SIZE);
1336 	sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages);
1337 	if (!sgl)
1338 		return -ENOMEM;
1339 
1340 	sg_dma_cnt = dma_map_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1341 	if (!sg_dma_cnt) {
1342 		err = -EINVAL;
1343 		goto err_map;
1344 	}
1345 
1346 	pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages;
1347 	pbl->phys.indirect.sgl = sgl;
1348 	pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt;
1349 	err = pbl_chunk_list_create(dev, pbl);
1350 	if (err) {
1351 		ibdev_dbg(&dev->ibdev,
1352 			  "chunk_list creation failed[%d]\n", err);
1353 		goto err_chunk;
1354 	}
1355 
1356 	ibdev_dbg(&dev->ibdev,
1357 		  "pbl indirect - size[%u], chunks[%u]\n",
1358 		  pbl->pbl_buf_size_in_bytes,
1359 		  pbl->phys.indirect.chunk_list.size);
1360 
1361 	return 0;
1362 
1363 err_chunk:
1364 	dma_unmap_sg(&dev->pdev->dev, sgl, size_in_pages, DMA_TO_DEVICE);
1365 err_map:
1366 	kfree(sgl);
1367 	return err;
1368 }
1369 
pbl_indirect_terminate(struct efa_dev * dev,struct pbl_context * pbl)1370 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl)
1371 {
1372 	pbl_chunk_list_destroy(dev, pbl);
1373 	dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl,
1374 		     pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE);
1375 	kfree(pbl->phys.indirect.sgl);
1376 }
1377 
1378 /* create a page buffer list from a mapped user memory region */
pbl_create(struct efa_dev * dev,struct pbl_context * pbl,struct ib_umem * umem,int hp_cnt,u8 hp_shift)1379 static int pbl_create(struct efa_dev *dev,
1380 		      struct pbl_context *pbl,
1381 		      struct ib_umem *umem,
1382 		      int hp_cnt,
1383 		      u8 hp_shift)
1384 {
1385 	int err;
1386 
1387 	pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE;
1388 	pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL);
1389 	if (!pbl->pbl_buf)
1390 		return -ENOMEM;
1391 
1392 	if (is_vmalloc_addr(pbl->pbl_buf)) {
1393 		pbl->physically_continuous = 0;
1394 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1395 					hp_shift);
1396 		if (err)
1397 			goto err_free;
1398 
1399 		err = pbl_indirect_initialize(dev, pbl);
1400 		if (err)
1401 			goto err_free;
1402 	} else {
1403 		pbl->physically_continuous = 1;
1404 		err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt,
1405 					hp_shift);
1406 		if (err)
1407 			goto err_free;
1408 
1409 		err = pbl_continuous_initialize(dev, pbl);
1410 		if (err)
1411 			goto err_free;
1412 	}
1413 
1414 	ibdev_dbg(&dev->ibdev,
1415 		  "user_pbl_created: user_pages[%u], continuous[%u]\n",
1416 		  hp_cnt, pbl->physically_continuous);
1417 
1418 	return 0;
1419 
1420 err_free:
1421 	kvfree(pbl->pbl_buf);
1422 	return err;
1423 }
1424 
pbl_destroy(struct efa_dev * dev,struct pbl_context * pbl)1425 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl)
1426 {
1427 	if (pbl->physically_continuous)
1428 		dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr,
1429 				 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE);
1430 	else
1431 		pbl_indirect_terminate(dev, pbl);
1432 
1433 	kvfree(pbl->pbl_buf);
1434 }
1435 
efa_create_inline_pbl(struct efa_dev * dev,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1436 static int efa_create_inline_pbl(struct efa_dev *dev, struct efa_mr *mr,
1437 				 struct efa_com_reg_mr_params *params)
1438 {
1439 	int err;
1440 
1441 	params->inline_pbl = 1;
1442 	err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array,
1443 				params->page_num, params->page_shift);
1444 	if (err)
1445 		return err;
1446 
1447 	ibdev_dbg(&dev->ibdev,
1448 		  "inline_pbl_array - pages[%u]\n", params->page_num);
1449 
1450 	return 0;
1451 }
1452 
efa_create_pbl(struct efa_dev * dev,struct pbl_context * pbl,struct efa_mr * mr,struct efa_com_reg_mr_params * params)1453 static int efa_create_pbl(struct efa_dev *dev,
1454 			  struct pbl_context *pbl,
1455 			  struct efa_mr *mr,
1456 			  struct efa_com_reg_mr_params *params)
1457 {
1458 	int err;
1459 
1460 	err = pbl_create(dev, pbl, mr->umem, params->page_num,
1461 			 params->page_shift);
1462 	if (err) {
1463 		ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err);
1464 		return err;
1465 	}
1466 
1467 	params->inline_pbl = 0;
1468 	params->indirect = !pbl->physically_continuous;
1469 	if (pbl->physically_continuous) {
1470 		params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes;
1471 
1472 		efa_com_set_dma_addr(pbl->phys.continuous.dma_addr,
1473 				     &params->pbl.pbl.address.mem_addr_high,
1474 				     &params->pbl.pbl.address.mem_addr_low);
1475 	} else {
1476 		params->pbl.pbl.length =
1477 			pbl->phys.indirect.chunk_list.chunks[0].length;
1478 
1479 		efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr,
1480 				     &params->pbl.pbl.address.mem_addr_high,
1481 				     &params->pbl.pbl.address.mem_addr_low);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
efa_reg_mr(struct ib_pd * ibpd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)1487 struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
1488 			 u64 virt_addr, int access_flags,
1489 			 struct ib_udata *udata)
1490 {
1491 	struct efa_dev *dev = to_edev(ibpd->device);
1492 	struct efa_com_reg_mr_params params = {};
1493 	struct efa_com_reg_mr_result result = {};
1494 	struct pbl_context pbl;
1495 	int supp_access_flags;
1496 	unsigned int pg_sz;
1497 	struct efa_mr *mr;
1498 	int inline_size;
1499 	int err;
1500 
1501 	if (udata && udata->inlen &&
1502 	    !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
1503 		ibdev_dbg(&dev->ibdev,
1504 			  "Incompatible ABI params, udata not cleared\n");
1505 		err = -EINVAL;
1506 		goto err_out;
1507 	}
1508 
1509 	supp_access_flags =
1510 		IB_ACCESS_LOCAL_WRITE |
1511 		(EFA_DEV_CAP(dev, RDMA_READ) ? IB_ACCESS_REMOTE_READ : 0);
1512 
1513 	access_flags &= ~IB_ACCESS_OPTIONAL;
1514 	if (access_flags & ~supp_access_flags) {
1515 		ibdev_dbg(&dev->ibdev,
1516 			  "Unsupported access flags[%#x], supported[%#x]\n",
1517 			  access_flags, supp_access_flags);
1518 		err = -EOPNOTSUPP;
1519 		goto err_out;
1520 	}
1521 
1522 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1523 	if (!mr) {
1524 		err = -ENOMEM;
1525 		goto err_out;
1526 	}
1527 
1528 	mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
1529 	if (IS_ERR(mr->umem)) {
1530 		err = PTR_ERR(mr->umem);
1531 		ibdev_dbg(&dev->ibdev,
1532 			  "Failed to pin and map user space memory[%d]\n", err);
1533 		goto err_free;
1534 	}
1535 
1536 	params.pd = to_epd(ibpd)->pdn;
1537 	params.iova = virt_addr;
1538 	params.mr_length_in_bytes = length;
1539 	params.permissions = access_flags;
1540 
1541 	pg_sz = ib_umem_find_best_pgsz(mr->umem,
1542 				       dev->dev_attr.page_size_cap,
1543 				       virt_addr);
1544 	if (!pg_sz) {
1545 		err = -EOPNOTSUPP;
1546 		ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
1547 			  dev->dev_attr.page_size_cap);
1548 		goto err_unmap;
1549 	}
1550 
1551 	params.page_shift = order_base_2(pg_sz);
1552 	params.page_num = ib_umem_num_dma_blocks(mr->umem, pg_sz);
1553 
1554 	ibdev_dbg(&dev->ibdev,
1555 		  "start %#llx length %#llx params.page_shift %u params.page_num %u\n",
1556 		  start, length, params.page_shift, params.page_num);
1557 
1558 	inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
1559 	if (params.page_num <= inline_size) {
1560 		err = efa_create_inline_pbl(dev, mr, &params);
1561 		if (err)
1562 			goto err_unmap;
1563 
1564 		err = efa_com_register_mr(&dev->edev, &params, &result);
1565 		if (err)
1566 			goto err_unmap;
1567 	} else {
1568 		err = efa_create_pbl(dev, &pbl, mr, &params);
1569 		if (err)
1570 			goto err_unmap;
1571 
1572 		err = efa_com_register_mr(&dev->edev, &params, &result);
1573 		pbl_destroy(dev, &pbl);
1574 
1575 		if (err)
1576 			goto err_unmap;
1577 	}
1578 
1579 	mr->ibmr.lkey = result.l_key;
1580 	mr->ibmr.rkey = result.r_key;
1581 	mr->ibmr.length = length;
1582 	ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
1583 
1584 	return &mr->ibmr;
1585 
1586 err_unmap:
1587 	ib_umem_release(mr->umem);
1588 err_free:
1589 	kfree(mr);
1590 err_out:
1591 	atomic64_inc(&dev->stats.reg_mr_err);
1592 	return ERR_PTR(err);
1593 }
1594 
efa_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)1595 int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1596 {
1597 	struct efa_dev *dev = to_edev(ibmr->device);
1598 	struct efa_com_dereg_mr_params params;
1599 	struct efa_mr *mr = to_emr(ibmr);
1600 	int err;
1601 
1602 	ibdev_dbg(&dev->ibdev, "Deregister mr[%d]\n", ibmr->lkey);
1603 
1604 	params.l_key = mr->ibmr.lkey;
1605 	err = efa_com_dereg_mr(&dev->edev, &params);
1606 	if (err)
1607 		return err;
1608 
1609 	ib_umem_release(mr->umem);
1610 	kfree(mr);
1611 
1612 	return 0;
1613 }
1614 
efa_get_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)1615 int efa_get_port_immutable(struct ib_device *ibdev, u8 port_num,
1616 			   struct ib_port_immutable *immutable)
1617 {
1618 	struct ib_port_attr attr;
1619 	int err;
1620 
1621 	err = ib_query_port(ibdev, port_num, &attr);
1622 	if (err) {
1623 		ibdev_dbg(ibdev, "Couldn't query port err[%d]\n", err);
1624 		return err;
1625 	}
1626 
1627 	immutable->pkey_tbl_len = attr.pkey_tbl_len;
1628 	immutable->gid_tbl_len = attr.gid_tbl_len;
1629 
1630 	return 0;
1631 }
1632 
efa_dealloc_uar(struct efa_dev * dev,u16 uarn)1633 static int efa_dealloc_uar(struct efa_dev *dev, u16 uarn)
1634 {
1635 	struct efa_com_dealloc_uar_params params = {
1636 		.uarn = uarn,
1637 	};
1638 
1639 	return efa_com_dealloc_uar(&dev->edev, &params);
1640 }
1641 
1642 #define EFA_CHECK_USER_COMP(_dev, _comp_mask, _attr, _mask, _attr_str) \
1643 	(_attr_str = (!(_dev)->dev_attr._attr || ((_comp_mask) & (_mask))) ? \
1644 		     NULL : #_attr)
1645 
efa_user_comp_handshake(const struct ib_ucontext * ibucontext,const struct efa_ibv_alloc_ucontext_cmd * cmd)1646 static int efa_user_comp_handshake(const struct ib_ucontext *ibucontext,
1647 				   const struct efa_ibv_alloc_ucontext_cmd *cmd)
1648 {
1649 	struct efa_dev *dev = to_edev(ibucontext->device);
1650 	char *attr_str;
1651 
1652 	if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, max_tx_batch,
1653 				EFA_ALLOC_UCONTEXT_CMD_COMP_TX_BATCH, attr_str))
1654 		goto err;
1655 
1656 	if (EFA_CHECK_USER_COMP(dev, cmd->comp_mask, min_sq_depth,
1657 				EFA_ALLOC_UCONTEXT_CMD_COMP_MIN_SQ_WR,
1658 				attr_str))
1659 		goto err;
1660 
1661 	return 0;
1662 
1663 err:
1664 	ibdev_dbg(&dev->ibdev, "Userspace handshake failed for %s attribute\n",
1665 		  attr_str);
1666 	return -EOPNOTSUPP;
1667 }
1668 
efa_alloc_ucontext(struct ib_ucontext * ibucontext,struct ib_udata * udata)1669 int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
1670 {
1671 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1672 	struct efa_dev *dev = to_edev(ibucontext->device);
1673 	struct efa_ibv_alloc_ucontext_resp resp = {};
1674 	struct efa_ibv_alloc_ucontext_cmd cmd = {};
1675 	struct efa_com_alloc_uar_result result;
1676 	int err;
1677 
1678 	/*
1679 	 * it's fine if the driver does not know all request fields,
1680 	 * we will ack input fields in our response.
1681 	 */
1682 
1683 	err = ib_copy_from_udata(&cmd, udata,
1684 				 min(sizeof(cmd), udata->inlen));
1685 	if (err) {
1686 		ibdev_dbg(&dev->ibdev,
1687 			  "Cannot copy udata for alloc_ucontext\n");
1688 		goto err_out;
1689 	}
1690 
1691 	err = efa_user_comp_handshake(ibucontext, &cmd);
1692 	if (err)
1693 		goto err_out;
1694 
1695 	err = efa_com_alloc_uar(&dev->edev, &result);
1696 	if (err)
1697 		goto err_out;
1698 
1699 	ucontext->uarn = result.uarn;
1700 
1701 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
1702 	resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
1703 	resp.sub_cqs_per_cq = dev->dev_attr.sub_cqs_per_cq;
1704 	resp.inline_buf_size = dev->dev_attr.inline_buf_size;
1705 	resp.max_llq_size = dev->dev_attr.max_llq_size;
1706 	resp.max_tx_batch = dev->dev_attr.max_tx_batch;
1707 	resp.min_sq_wr = dev->dev_attr.min_sq_depth;
1708 
1709 	err = ib_copy_to_udata(udata, &resp,
1710 			       min(sizeof(resp), udata->outlen));
1711 	if (err)
1712 		goto err_dealloc_uar;
1713 
1714 	return 0;
1715 
1716 err_dealloc_uar:
1717 	efa_dealloc_uar(dev, result.uarn);
1718 err_out:
1719 	atomic64_inc(&dev->stats.alloc_ucontext_err);
1720 	return err;
1721 }
1722 
efa_dealloc_ucontext(struct ib_ucontext * ibucontext)1723 void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
1724 {
1725 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1726 	struct efa_dev *dev = to_edev(ibucontext->device);
1727 
1728 	efa_dealloc_uar(dev, ucontext->uarn);
1729 }
1730 
efa_mmap_free(struct rdma_user_mmap_entry * rdma_entry)1731 void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
1732 {
1733 	struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
1734 
1735 	kfree(entry);
1736 }
1737 
__efa_mmap(struct efa_dev * dev,struct efa_ucontext * ucontext,struct vm_area_struct * vma)1738 static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
1739 		      struct vm_area_struct *vma)
1740 {
1741 	struct rdma_user_mmap_entry *rdma_entry;
1742 	struct efa_user_mmap_entry *entry;
1743 	unsigned long va;
1744 	int err = 0;
1745 	u64 pfn;
1746 
1747 	rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
1748 	if (!rdma_entry) {
1749 		ibdev_dbg(&dev->ibdev,
1750 			  "pgoff[%#lx] does not have valid entry\n",
1751 			  vma->vm_pgoff);
1752 		atomic64_inc(&dev->stats.mmap_err);
1753 		return -EINVAL;
1754 	}
1755 	entry = to_emmap(rdma_entry);
1756 
1757 	ibdev_dbg(&dev->ibdev,
1758 		  "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
1759 		  entry->address, rdma_entry->npages * PAGE_SIZE,
1760 		  entry->mmap_flag);
1761 
1762 	pfn = entry->address >> PAGE_SHIFT;
1763 	switch (entry->mmap_flag) {
1764 	case EFA_MMAP_IO_NC:
1765 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1766 					entry->rdma_entry.npages * PAGE_SIZE,
1767 					pgprot_noncached(vma->vm_page_prot),
1768 					rdma_entry);
1769 		break;
1770 	case EFA_MMAP_IO_WC:
1771 		err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
1772 					entry->rdma_entry.npages * PAGE_SIZE,
1773 					pgprot_writecombine(vma->vm_page_prot),
1774 					rdma_entry);
1775 		break;
1776 	case EFA_MMAP_DMA_PAGE:
1777 		for (va = vma->vm_start; va < vma->vm_end;
1778 		     va += PAGE_SIZE, pfn++) {
1779 			err = vm_insert_page(vma, va, pfn_to_page(pfn));
1780 			if (err)
1781 				break;
1782 		}
1783 		break;
1784 	default:
1785 		err = -EINVAL;
1786 	}
1787 
1788 	if (err) {
1789 		ibdev_dbg(
1790 			&dev->ibdev,
1791 			"Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
1792 			entry->address, rdma_entry->npages * PAGE_SIZE,
1793 			entry->mmap_flag, err);
1794 		atomic64_inc(&dev->stats.mmap_err);
1795 	}
1796 
1797 	rdma_user_mmap_entry_put(rdma_entry);
1798 	return err;
1799 }
1800 
efa_mmap(struct ib_ucontext * ibucontext,struct vm_area_struct * vma)1801 int efa_mmap(struct ib_ucontext *ibucontext,
1802 	     struct vm_area_struct *vma)
1803 {
1804 	struct efa_ucontext *ucontext = to_eucontext(ibucontext);
1805 	struct efa_dev *dev = to_edev(ibucontext->device);
1806 	size_t length = vma->vm_end - vma->vm_start;
1807 
1808 	ibdev_dbg(&dev->ibdev,
1809 		  "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
1810 		  vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
1811 
1812 	return __efa_mmap(dev, ucontext, vma);
1813 }
1814 
efa_ah_destroy(struct efa_dev * dev,struct efa_ah * ah)1815 static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
1816 {
1817 	struct efa_com_destroy_ah_params params = {
1818 		.ah = ah->ah,
1819 		.pdn = to_epd(ah->ibah.pd)->pdn,
1820 	};
1821 
1822 	return efa_com_destroy_ah(&dev->edev, &params);
1823 }
1824 
efa_create_ah(struct ib_ah * ibah,struct rdma_ah_init_attr * init_attr,struct ib_udata * udata)1825 int efa_create_ah(struct ib_ah *ibah,
1826 		  struct rdma_ah_init_attr *init_attr,
1827 		  struct ib_udata *udata)
1828 {
1829 	struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
1830 	struct efa_dev *dev = to_edev(ibah->device);
1831 	struct efa_com_create_ah_params params = {};
1832 	struct efa_ibv_create_ah_resp resp = {};
1833 	struct efa_com_create_ah_result result;
1834 	struct efa_ah *ah = to_eah(ibah);
1835 	int err;
1836 
1837 	if (!(init_attr->flags & RDMA_CREATE_AH_SLEEPABLE)) {
1838 		ibdev_dbg(&dev->ibdev,
1839 			  "Create address handle is not supported in atomic context\n");
1840 		err = -EOPNOTSUPP;
1841 		goto err_out;
1842 	}
1843 
1844 	if (udata->inlen &&
1845 	    !ib_is_udata_cleared(udata, 0, udata->inlen)) {
1846 		ibdev_dbg(&dev->ibdev, "Incompatible ABI params\n");
1847 		err = -EINVAL;
1848 		goto err_out;
1849 	}
1850 
1851 	memcpy(params.dest_addr, ah_attr->grh.dgid.raw,
1852 	       sizeof(params.dest_addr));
1853 	params.pdn = to_epd(ibah->pd)->pdn;
1854 	err = efa_com_create_ah(&dev->edev, &params, &result);
1855 	if (err)
1856 		goto err_out;
1857 
1858 	memcpy(ah->id, ah_attr->grh.dgid.raw, sizeof(ah->id));
1859 	ah->ah = result.ah;
1860 
1861 	resp.efa_address_handle = result.ah;
1862 
1863 	if (udata->outlen) {
1864 		err = ib_copy_to_udata(udata, &resp,
1865 				       min(sizeof(resp), udata->outlen));
1866 		if (err) {
1867 			ibdev_dbg(&dev->ibdev,
1868 				  "Failed to copy udata for create_ah response\n");
1869 			goto err_destroy_ah;
1870 		}
1871 	}
1872 	ibdev_dbg(&dev->ibdev, "Created ah[%d]\n", ah->ah);
1873 
1874 	return 0;
1875 
1876 err_destroy_ah:
1877 	efa_ah_destroy(dev, ah);
1878 err_out:
1879 	atomic64_inc(&dev->stats.create_ah_err);
1880 	return err;
1881 }
1882 
efa_destroy_ah(struct ib_ah * ibah,u32 flags)1883 int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
1884 {
1885 	struct efa_dev *dev = to_edev(ibah->pd->device);
1886 	struct efa_ah *ah = to_eah(ibah);
1887 
1888 	ibdev_dbg(&dev->ibdev, "Destroy ah[%d]\n", ah->ah);
1889 
1890 	if (!(flags & RDMA_DESTROY_AH_SLEEPABLE)) {
1891 		ibdev_dbg(&dev->ibdev,
1892 			  "Destroy address handle is not supported in atomic context\n");
1893 		return -EOPNOTSUPP;
1894 	}
1895 
1896 	efa_ah_destroy(dev, ah);
1897 	return 0;
1898 }
1899 
efa_alloc_hw_stats(struct ib_device * ibdev,u8 port_num)1900 struct rdma_hw_stats *efa_alloc_hw_stats(struct ib_device *ibdev, u8 port_num)
1901 {
1902 	return rdma_alloc_hw_stats_struct(efa_stats_names,
1903 					  ARRAY_SIZE(efa_stats_names),
1904 					  RDMA_HW_STATS_DEFAULT_LIFESPAN);
1905 }
1906 
efa_get_hw_stats(struct ib_device * ibdev,struct rdma_hw_stats * stats,u8 port_num,int index)1907 int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
1908 		     u8 port_num, int index)
1909 {
1910 	struct efa_com_get_stats_params params = {};
1911 	union efa_com_get_stats_result result;
1912 	struct efa_dev *dev = to_edev(ibdev);
1913 	struct efa_com_rdma_read_stats *rrs;
1914 	struct efa_com_messages_stats *ms;
1915 	struct efa_com_basic_stats *bs;
1916 	struct efa_com_stats_admin *as;
1917 	struct efa_stats *s;
1918 	int err;
1919 
1920 	params.scope = EFA_ADMIN_GET_STATS_SCOPE_ALL;
1921 	params.type = EFA_ADMIN_GET_STATS_TYPE_BASIC;
1922 
1923 	err = efa_com_get_stats(&dev->edev, &params, &result);
1924 	if (err)
1925 		return err;
1926 
1927 	bs = &result.basic_stats;
1928 	stats->value[EFA_TX_BYTES] = bs->tx_bytes;
1929 	stats->value[EFA_TX_PKTS] = bs->tx_pkts;
1930 	stats->value[EFA_RX_BYTES] = bs->rx_bytes;
1931 	stats->value[EFA_RX_PKTS] = bs->rx_pkts;
1932 	stats->value[EFA_RX_DROPS] = bs->rx_drops;
1933 
1934 	params.type = EFA_ADMIN_GET_STATS_TYPE_MESSAGES;
1935 	err = efa_com_get_stats(&dev->edev, &params, &result);
1936 	if (err)
1937 		return err;
1938 
1939 	ms = &result.messages_stats;
1940 	stats->value[EFA_SEND_BYTES] = ms->send_bytes;
1941 	stats->value[EFA_SEND_WRS] = ms->send_wrs;
1942 	stats->value[EFA_RECV_BYTES] = ms->recv_bytes;
1943 	stats->value[EFA_RECV_WRS] = ms->recv_wrs;
1944 
1945 	params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_READ;
1946 	err = efa_com_get_stats(&dev->edev, &params, &result);
1947 	if (err)
1948 		return err;
1949 
1950 	rrs = &result.rdma_read_stats;
1951 	stats->value[EFA_RDMA_READ_WRS] = rrs->read_wrs;
1952 	stats->value[EFA_RDMA_READ_BYTES] = rrs->read_bytes;
1953 	stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
1954 	stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
1955 
1956 	as = &dev->edev.aq.stats;
1957 	stats->value[EFA_SUBMITTED_CMDS] = atomic64_read(&as->submitted_cmd);
1958 	stats->value[EFA_COMPLETED_CMDS] = atomic64_read(&as->completed_cmd);
1959 	stats->value[EFA_CMDS_ERR] = atomic64_read(&as->cmd_err);
1960 	stats->value[EFA_NO_COMPLETION_CMDS] = atomic64_read(&as->no_completion);
1961 
1962 	s = &dev->stats;
1963 	stats->value[EFA_KEEP_ALIVE_RCVD] = atomic64_read(&s->keep_alive_rcvd);
1964 	stats->value[EFA_ALLOC_PD_ERR] = atomic64_read(&s->alloc_pd_err);
1965 	stats->value[EFA_CREATE_QP_ERR] = atomic64_read(&s->create_qp_err);
1966 	stats->value[EFA_CREATE_CQ_ERR] = atomic64_read(&s->create_cq_err);
1967 	stats->value[EFA_REG_MR_ERR] = atomic64_read(&s->reg_mr_err);
1968 	stats->value[EFA_ALLOC_UCONTEXT_ERR] =
1969 		atomic64_read(&s->alloc_ucontext_err);
1970 	stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
1971 	stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
1972 
1973 	return ARRAY_SIZE(efa_stats_names);
1974 }
1975 
efa_port_link_layer(struct ib_device * ibdev,u8 port_num)1976 enum rdma_link_layer efa_port_link_layer(struct ib_device *ibdev,
1977 					 u8 port_num)
1978 {
1979 	return IB_LINK_LAYER_UNSPECIFIED;
1980 }
1981 
1982