• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2 /*
3  * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
4  */
5 
6 #include <linux/module.h>
7 #include <linux/pci.h>
8 #include <linux/utsname.h>
9 #include <linux/version.h>
10 
11 #include <rdma/ib_user_verbs.h>
12 
13 #include "efa.h"
14 
15 #define PCI_DEV_ID_EFA0_VF 0xefa0
16 #define PCI_DEV_ID_EFA1_VF 0xefa1
17 #define PCI_DEV_ID_EFA2_VF 0xefa2
18 
19 static const struct pci_device_id efa_pci_tbl[] = {
20 	{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
21 	{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
22 	{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
23 	{ }
24 };
25 
26 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
27 MODULE_LICENSE("Dual BSD/GPL");
28 MODULE_DESCRIPTION(DEVICE_NAME);
29 MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
30 
31 #define EFA_REG_BAR 0
32 #define EFA_MEM_BAR 2
33 #define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
34 
35 #define EFA_AENQ_ENABLED_GROUPS \
36 	(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
37 	 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
38 
39 /* This handler will called for unknown event group or unimplemented handlers */
unimplemented_aenq_handler(void * data,struct efa_admin_aenq_entry * aenq_e)40 static void unimplemented_aenq_handler(void *data,
41 				       struct efa_admin_aenq_entry *aenq_e)
42 {
43 	struct efa_dev *dev = (struct efa_dev *)data;
44 
45 	ibdev_err(&dev->ibdev,
46 		  "Unknown event was received or event with unimplemented handler\n");
47 }
48 
efa_keep_alive(void * data,struct efa_admin_aenq_entry * aenq_e)49 static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
50 {
51 	struct efa_dev *dev = (struct efa_dev *)data;
52 
53 	atomic64_inc(&dev->stats.keep_alive_rcvd);
54 }
55 
56 static struct efa_aenq_handlers aenq_handlers = {
57 	.handlers = {
58 		[EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
59 	},
60 	.unimplemented_handler = unimplemented_aenq_handler
61 };
62 
efa_release_bars(struct efa_dev * dev,int bars_mask)63 static void efa_release_bars(struct efa_dev *dev, int bars_mask)
64 {
65 	struct pci_dev *pdev = dev->pdev;
66 	int release_bars;
67 
68 	release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
69 	pci_release_selected_regions(pdev, release_bars);
70 }
71 
efa_intr_msix_mgmnt(int irq,void * data)72 static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
73 {
74 	struct efa_dev *dev = data;
75 
76 	efa_com_admin_q_comp_intr_handler(&dev->edev);
77 	efa_com_aenq_intr_handler(&dev->edev, data);
78 
79 	return IRQ_HANDLED;
80 }
81 
efa_request_mgmnt_irq(struct efa_dev * dev)82 static int efa_request_mgmnt_irq(struct efa_dev *dev)
83 {
84 	struct efa_irq *irq;
85 	int err;
86 
87 	irq = &dev->admin_irq;
88 	err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
89 	if (err) {
90 		dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
91 			err);
92 		return err;
93 	}
94 
95 	dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
96 		nr_cpumask_bits, &irq->affinity_hint_mask, irq->irqn);
97 	irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
98 
99 	return 0;
100 }
101 
efa_setup_mgmnt_irq(struct efa_dev * dev)102 static void efa_setup_mgmnt_irq(struct efa_dev *dev)
103 {
104 	u32 cpu;
105 
106 	snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
107 		 "efa-mgmnt@pci:%s", pci_name(dev->pdev));
108 	dev->admin_irq.handler = efa_intr_msix_mgmnt;
109 	dev->admin_irq.data = dev;
110 	dev->admin_irq.irqn =
111 		pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
112 	cpu = cpumask_first(cpu_online_mask);
113 	cpumask_set_cpu(cpu,
114 			&dev->admin_irq.affinity_hint_mask);
115 	dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
116 		 dev->admin_irq.irqn,
117 		 dev->admin_irq.name);
118 }
119 
efa_free_mgmnt_irq(struct efa_dev * dev)120 static void efa_free_mgmnt_irq(struct efa_dev *dev)
121 {
122 	struct efa_irq *irq;
123 
124 	irq = &dev->admin_irq;
125 	irq_set_affinity_hint(irq->irqn, NULL);
126 	free_irq(irq->irqn, irq->data);
127 }
128 
efa_set_mgmnt_irq(struct efa_dev * dev)129 static int efa_set_mgmnt_irq(struct efa_dev *dev)
130 {
131 	efa_setup_mgmnt_irq(dev);
132 
133 	return efa_request_mgmnt_irq(dev);
134 }
135 
efa_request_doorbell_bar(struct efa_dev * dev)136 static int efa_request_doorbell_bar(struct efa_dev *dev)
137 {
138 	u8 db_bar_idx = dev->dev_attr.db_bar;
139 	struct pci_dev *pdev = dev->pdev;
140 	int bars;
141 	int err;
142 
143 	if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
144 		bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
145 
146 		err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
147 		if (err) {
148 			dev_err(&dev->pdev->dev,
149 				"pci_request_selected_regions for bar %d failed %d\n",
150 				db_bar_idx, err);
151 			return err;
152 		}
153 	}
154 
155 	dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
156 	dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
157 
158 	return 0;
159 }
160 
efa_release_doorbell_bar(struct efa_dev * dev)161 static void efa_release_doorbell_bar(struct efa_dev *dev)
162 {
163 	if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
164 		efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
165 }
166 
efa_update_hw_hints(struct efa_dev * dev,struct efa_com_get_hw_hints_result * hw_hints)167 static void efa_update_hw_hints(struct efa_dev *dev,
168 				struct efa_com_get_hw_hints_result *hw_hints)
169 {
170 	struct efa_com_dev *edev = &dev->edev;
171 
172 	if (hw_hints->mmio_read_timeout)
173 		edev->mmio_read.mmio_read_timeout =
174 			hw_hints->mmio_read_timeout * 1000;
175 
176 	if (hw_hints->poll_interval)
177 		edev->aq.poll_interval = hw_hints->poll_interval;
178 
179 	if (hw_hints->admin_completion_timeout)
180 		edev->aq.completion_timeout =
181 			hw_hints->admin_completion_timeout;
182 }
183 
efa_stats_init(struct efa_dev * dev)184 static void efa_stats_init(struct efa_dev *dev)
185 {
186 	atomic64_t *s = (atomic64_t *)&dev->stats;
187 	int i;
188 
189 	for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
190 		atomic64_set(s, 0);
191 }
192 
efa_set_host_info(struct efa_dev * dev)193 static void efa_set_host_info(struct efa_dev *dev)
194 {
195 	struct efa_admin_set_feature_resp resp = {};
196 	struct efa_admin_set_feature_cmd cmd = {};
197 	struct efa_admin_host_info *hinf;
198 	u32 bufsz = sizeof(*hinf);
199 	dma_addr_t hinf_dma;
200 
201 	if (!efa_com_check_supported_feature_id(&dev->edev,
202 						EFA_ADMIN_HOST_INFO))
203 		return;
204 
205 	/* Failures in host info set shall not disturb probe */
206 	hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
207 				  GFP_KERNEL);
208 	if (!hinf)
209 		return;
210 
211 	strscpy(hinf->os_dist_str, utsname()->release,
212 		sizeof(hinf->os_dist_str));
213 	hinf->os_type = EFA_ADMIN_OS_LINUX;
214 	strscpy(hinf->kernel_ver_str, utsname()->version,
215 		sizeof(hinf->kernel_ver_str));
216 	hinf->kernel_ver = LINUX_VERSION_CODE;
217 	EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
218 	EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
219 	EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
220 	EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
221 	EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
222 	EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
223 		PCI_SLOT(dev->pdev->devfn));
224 	EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
225 		PCI_FUNC(dev->pdev->devfn));
226 	EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
227 		EFA_COMMON_SPEC_VERSION_MAJOR);
228 	EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
229 		EFA_COMMON_SPEC_VERSION_MINOR);
230 	EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
231 	EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
232 
233 	efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
234 			       hinf_dma, bufsz);
235 
236 	dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
237 }
238 
239 static const struct ib_device_ops efa_dev_ops = {
240 	.owner = THIS_MODULE,
241 	.driver_id = RDMA_DRIVER_EFA,
242 	.uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
243 
244 	.alloc_hw_port_stats = efa_alloc_hw_port_stats,
245 	.alloc_hw_device_stats = efa_alloc_hw_device_stats,
246 	.alloc_pd = efa_alloc_pd,
247 	.alloc_ucontext = efa_alloc_ucontext,
248 	.create_cq = efa_create_cq,
249 	.create_qp = efa_create_qp,
250 	.create_user_ah = efa_create_ah,
251 	.dealloc_pd = efa_dealloc_pd,
252 	.dealloc_ucontext = efa_dealloc_ucontext,
253 	.dereg_mr = efa_dereg_mr,
254 	.destroy_ah = efa_destroy_ah,
255 	.destroy_cq = efa_destroy_cq,
256 	.destroy_qp = efa_destroy_qp,
257 	.get_hw_stats = efa_get_hw_stats,
258 	.get_link_layer = efa_port_link_layer,
259 	.get_port_immutable = efa_get_port_immutable,
260 	.mmap = efa_mmap,
261 	.mmap_free = efa_mmap_free,
262 	.modify_qp = efa_modify_qp,
263 	.query_device = efa_query_device,
264 	.query_gid = efa_query_gid,
265 	.query_pkey = efa_query_pkey,
266 	.query_port = efa_query_port,
267 	.query_qp = efa_query_qp,
268 	.reg_user_mr = efa_reg_mr,
269 
270 	INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
271 	INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
272 	INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
273 	INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
274 	INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
275 };
276 
efa_ib_device_add(struct efa_dev * dev)277 static int efa_ib_device_add(struct efa_dev *dev)
278 {
279 	struct efa_com_get_hw_hints_result hw_hints;
280 	struct pci_dev *pdev = dev->pdev;
281 	int err;
282 
283 	efa_stats_init(dev);
284 
285 	err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
286 	if (err)
287 		return err;
288 
289 	dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
290 	err = efa_request_doorbell_bar(dev);
291 	if (err)
292 		return err;
293 
294 	err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
295 	if (err)
296 		goto err_release_doorbell_bar;
297 
298 	efa_update_hw_hints(dev, &hw_hints);
299 
300 	/* Try to enable all the available aenq groups */
301 	err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
302 	if (err)
303 		goto err_release_doorbell_bar;
304 
305 	efa_set_host_info(dev);
306 
307 	dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
308 	dev->ibdev.phys_port_cnt = 1;
309 	dev->ibdev.num_comp_vectors = 1;
310 	dev->ibdev.dev.parent = &pdev->dev;
311 
312 	ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
313 
314 	err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
315 	if (err)
316 		goto err_release_doorbell_bar;
317 
318 	ibdev_info(&dev->ibdev, "IB device registered\n");
319 
320 	return 0;
321 
322 err_release_doorbell_bar:
323 	efa_release_doorbell_bar(dev);
324 	return err;
325 }
326 
efa_ib_device_remove(struct efa_dev * dev)327 static void efa_ib_device_remove(struct efa_dev *dev)
328 {
329 	efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
330 	ibdev_info(&dev->ibdev, "Unregister ib device\n");
331 	ib_unregister_device(&dev->ibdev);
332 	efa_release_doorbell_bar(dev);
333 }
334 
efa_disable_msix(struct efa_dev * dev)335 static void efa_disable_msix(struct efa_dev *dev)
336 {
337 	pci_free_irq_vectors(dev->pdev);
338 }
339 
efa_enable_msix(struct efa_dev * dev)340 static int efa_enable_msix(struct efa_dev *dev)
341 {
342 	int msix_vecs, irq_num;
343 
344 	/* Reserve the max msix vectors we might need */
345 	msix_vecs = EFA_NUM_MSIX_VEC;
346 	dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
347 		msix_vecs);
348 
349 	dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
350 	irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
351 					msix_vecs, PCI_IRQ_MSIX);
352 
353 	if (irq_num < 0) {
354 		dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
355 			irq_num);
356 		return -ENOSPC;
357 	}
358 
359 	if (irq_num != msix_vecs) {
360 		efa_disable_msix(dev);
361 		dev_err(&dev->pdev->dev,
362 			"Allocated %d MSI-X (out of %d requested)\n",
363 			irq_num, msix_vecs);
364 		return -ENOSPC;
365 	}
366 
367 	return 0;
368 }
369 
efa_device_init(struct efa_com_dev * edev,struct pci_dev * pdev)370 static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
371 {
372 	int dma_width;
373 	int err;
374 
375 	err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
376 	if (err)
377 		return err;
378 
379 	err = efa_com_validate_version(edev);
380 	if (err)
381 		return err;
382 
383 	dma_width = efa_com_get_dma_width(edev);
384 	if (dma_width < 0) {
385 		err = dma_width;
386 		return err;
387 	}
388 
389 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
390 	if (err) {
391 		dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
392 		return err;
393 	}
394 
395 	dma_set_max_seg_size(&pdev->dev, UINT_MAX);
396 	return 0;
397 }
398 
efa_probe_device(struct pci_dev * pdev)399 static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
400 {
401 	struct efa_com_dev *edev;
402 	struct efa_dev *dev;
403 	int bars;
404 	int err;
405 
406 	err = pci_enable_device_mem(pdev);
407 	if (err) {
408 		dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
409 		return ERR_PTR(err);
410 	}
411 
412 	pci_set_master(pdev);
413 
414 	dev = ib_alloc_device(efa_dev, ibdev);
415 	if (!dev) {
416 		dev_err(&pdev->dev, "Device alloc failed\n");
417 		err = -ENOMEM;
418 		goto err_disable_device;
419 	}
420 
421 	pci_set_drvdata(pdev, dev);
422 	edev = &dev->edev;
423 	edev->efa_dev = dev;
424 	edev->dmadev = &pdev->dev;
425 	dev->pdev = pdev;
426 
427 	bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
428 	err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
429 	if (err) {
430 		dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
431 			err);
432 		goto err_ibdev_destroy;
433 	}
434 
435 	dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
436 	dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
437 	dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
438 	dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
439 
440 	edev->reg_bar = devm_ioremap(&pdev->dev,
441 				     dev->reg_bar_addr,
442 				     dev->reg_bar_len);
443 	if (!edev->reg_bar) {
444 		dev_err(&pdev->dev, "Failed to remap register bar\n");
445 		err = -EFAULT;
446 		goto err_release_bars;
447 	}
448 
449 	err = efa_com_mmio_reg_read_init(edev);
450 	if (err) {
451 		dev_err(&pdev->dev, "Failed to init readless MMIO\n");
452 		goto err_iounmap;
453 	}
454 
455 	err = efa_device_init(edev, pdev);
456 	if (err) {
457 		dev_err(&pdev->dev, "EFA device init failed\n");
458 		if (err == -ETIME)
459 			err = -EPROBE_DEFER;
460 		goto err_reg_read_destroy;
461 	}
462 
463 	err = efa_enable_msix(dev);
464 	if (err)
465 		goto err_reg_read_destroy;
466 
467 	edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
468 	edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
469 
470 	err = efa_set_mgmnt_irq(dev);
471 	if (err)
472 		goto err_disable_msix;
473 
474 	err = efa_com_admin_init(edev, &aenq_handlers);
475 	if (err)
476 		goto err_free_mgmnt_irq;
477 
478 	return dev;
479 
480 err_free_mgmnt_irq:
481 	efa_free_mgmnt_irq(dev);
482 err_disable_msix:
483 	efa_disable_msix(dev);
484 err_reg_read_destroy:
485 	efa_com_mmio_reg_read_destroy(edev);
486 err_iounmap:
487 	devm_iounmap(&pdev->dev, edev->reg_bar);
488 err_release_bars:
489 	efa_release_bars(dev, EFA_BASE_BAR_MASK);
490 err_ibdev_destroy:
491 	ib_dealloc_device(&dev->ibdev);
492 err_disable_device:
493 	pci_disable_device(pdev);
494 	return ERR_PTR(err);
495 }
496 
efa_remove_device(struct pci_dev * pdev)497 static void efa_remove_device(struct pci_dev *pdev)
498 {
499 	struct efa_dev *dev = pci_get_drvdata(pdev);
500 	struct efa_com_dev *edev;
501 
502 	edev = &dev->edev;
503 	efa_com_admin_destroy(edev);
504 	efa_free_mgmnt_irq(dev);
505 	efa_disable_msix(dev);
506 	efa_com_mmio_reg_read_destroy(edev);
507 	devm_iounmap(&pdev->dev, edev->reg_bar);
508 	efa_release_bars(dev, EFA_BASE_BAR_MASK);
509 	ib_dealloc_device(&dev->ibdev);
510 	pci_disable_device(pdev);
511 }
512 
efa_probe(struct pci_dev * pdev,const struct pci_device_id * ent)513 static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
514 {
515 	struct efa_dev *dev;
516 	int err;
517 
518 	dev = efa_probe_device(pdev);
519 	if (IS_ERR(dev))
520 		return PTR_ERR(dev);
521 
522 	err = efa_ib_device_add(dev);
523 	if (err)
524 		goto err_remove_device;
525 
526 	return 0;
527 
528 err_remove_device:
529 	efa_remove_device(pdev);
530 	return err;
531 }
532 
efa_remove(struct pci_dev * pdev)533 static void efa_remove(struct pci_dev *pdev)
534 {
535 	struct efa_dev *dev = pci_get_drvdata(pdev);
536 
537 	efa_ib_device_remove(dev);
538 	efa_remove_device(pdev);
539 }
540 
541 static struct pci_driver efa_pci_driver = {
542 	.name           = DRV_MODULE_NAME,
543 	.id_table       = efa_pci_tbl,
544 	.probe          = efa_probe,
545 	.remove         = efa_remove,
546 };
547 
548 module_pci_driver(efa_pci_driver);
549