• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/mlx5/driver.h>
42 #include <linux/mlx5/cq.h>
43 #include <linux/mlx5/qp.h>
44 #include <linux/mlx5/srq.h>
45 #include <linux/debugfs.h>
46 #include <linux/mlx5/mlx5_ifc.h>
47 #include "mlx5_core.h"
48 
49 #define DRIVER_NAME "mlx5_core"
50 #define DRIVER_VERSION "2.2-1"
51 #define DRIVER_RELDATE	"Feb 2014"
52 
53 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
54 MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRIVER_VERSION);
57 
58 int mlx5_core_debug_mask;
59 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
60 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
61 
62 #define MLX5_DEFAULT_PROF	2
63 static int prof_sel = MLX5_DEFAULT_PROF;
64 module_param_named(prof_sel, prof_sel, int, 0444);
65 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
66 
67 struct workqueue_struct *mlx5_core_wq;
68 static LIST_HEAD(intf_list);
69 static LIST_HEAD(dev_list);
70 static DEFINE_MUTEX(intf_mutex);
71 
72 struct mlx5_device_context {
73 	struct list_head	list;
74 	struct mlx5_interface  *intf;
75 	void		       *context;
76 };
77 
78 static struct mlx5_profile profile[] = {
79 	[0] = {
80 		.mask           = 0,
81 	},
82 	[1] = {
83 		.mask		= MLX5_PROF_MASK_QP_SIZE,
84 		.log_max_qp	= 12,
85 	},
86 	[2] = {
87 		.mask		= MLX5_PROF_MASK_QP_SIZE |
88 				  MLX5_PROF_MASK_MR_CACHE,
89 		.log_max_qp	= 18,
90 		.mr_cache[0]	= {
91 			.size	= 500,
92 			.limit	= 250
93 		},
94 		.mr_cache[1]	= {
95 			.size	= 500,
96 			.limit	= 250
97 		},
98 		.mr_cache[2]	= {
99 			.size	= 500,
100 			.limit	= 250
101 		},
102 		.mr_cache[3]	= {
103 			.size	= 500,
104 			.limit	= 250
105 		},
106 		.mr_cache[4]	= {
107 			.size	= 500,
108 			.limit	= 250
109 		},
110 		.mr_cache[5]	= {
111 			.size	= 500,
112 			.limit	= 250
113 		},
114 		.mr_cache[6]	= {
115 			.size	= 500,
116 			.limit	= 250
117 		},
118 		.mr_cache[7]	= {
119 			.size	= 500,
120 			.limit	= 250
121 		},
122 		.mr_cache[8]	= {
123 			.size	= 500,
124 			.limit	= 250
125 		},
126 		.mr_cache[9]	= {
127 			.size	= 500,
128 			.limit	= 250
129 		},
130 		.mr_cache[10]	= {
131 			.size	= 500,
132 			.limit	= 250
133 		},
134 		.mr_cache[11]	= {
135 			.size	= 500,
136 			.limit	= 250
137 		},
138 		.mr_cache[12]	= {
139 			.size	= 64,
140 			.limit	= 32
141 		},
142 		.mr_cache[13]	= {
143 			.size	= 32,
144 			.limit	= 16
145 		},
146 		.mr_cache[14]	= {
147 			.size	= 16,
148 			.limit	= 8
149 		},
150 		.mr_cache[15]	= {
151 			.size	= 8,
152 			.limit	= 4
153 		},
154 	},
155 };
156 
set_dma_caps(struct pci_dev * pdev)157 static int set_dma_caps(struct pci_dev *pdev)
158 {
159 	int err;
160 
161 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
162 	if (err) {
163 		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
164 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
165 		if (err) {
166 			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
167 			return err;
168 		}
169 	}
170 
171 	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
172 	if (err) {
173 		dev_warn(&pdev->dev,
174 			 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
175 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
176 		if (err) {
177 			dev_err(&pdev->dev,
178 				"Can't set consistent PCI DMA mask, aborting\n");
179 			return err;
180 		}
181 	}
182 
183 	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
184 	return err;
185 }
186 
request_bar(struct pci_dev * pdev)187 static int request_bar(struct pci_dev *pdev)
188 {
189 	int err = 0;
190 
191 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
192 		dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
193 		return -ENODEV;
194 	}
195 
196 	err = pci_request_regions(pdev, DRIVER_NAME);
197 	if (err)
198 		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
199 
200 	return err;
201 }
202 
release_bar(struct pci_dev * pdev)203 static void release_bar(struct pci_dev *pdev)
204 {
205 	pci_release_regions(pdev);
206 }
207 
mlx5_enable_msix(struct mlx5_core_dev * dev)208 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
209 {
210 	struct mlx5_eq_table *table = &dev->priv.eq_table;
211 	int num_eqs = 1 << dev->caps.gen.log_max_eq;
212 	int nvec;
213 	int i;
214 
215 	nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
216 	nvec = min_t(int, nvec, num_eqs);
217 	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
218 		return -ENOMEM;
219 
220 	table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
221 	if (!table->msix_arr)
222 		return -ENOMEM;
223 
224 	for (i = 0; i < nvec; i++)
225 		table->msix_arr[i].entry = i;
226 
227 	nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
228 				     MLX5_EQ_VEC_COMP_BASE, nvec);
229 	if (nvec < 0)
230 		return nvec;
231 
232 	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
233 
234 	return 0;
235 }
236 
mlx5_disable_msix(struct mlx5_core_dev * dev)237 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
238 {
239 	struct mlx5_eq_table *table = &dev->priv.eq_table;
240 
241 	pci_disable_msix(dev->pdev);
242 	kfree(table->msix_arr);
243 }
244 
245 struct mlx5_reg_host_endianess {
246 	u8	he;
247 	u8      rsvd[15];
248 };
249 
250 
251 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
252 
253 enum {
254 	MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
255 				MLX5_DEV_CAP_FLAG_DCT,
256 };
257 
to_fw_pkey_sz(u32 size)258 static u16 to_fw_pkey_sz(u32 size)
259 {
260 	switch (size) {
261 	case 128:
262 		return 0;
263 	case 256:
264 		return 1;
265 	case 512:
266 		return 2;
267 	case 1024:
268 		return 3;
269 	case 2048:
270 		return 4;
271 	case 4096:
272 		return 5;
273 	default:
274 		pr_warn("invalid pkey table size %d\n", size);
275 		return 0;
276 	}
277 }
278 
279 /* selectively copy writable fields clearing any reserved area
280  */
copy_rw_fields(void * to,struct mlx5_caps * from)281 static void copy_rw_fields(void *to, struct mlx5_caps *from)
282 {
283 	__be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
284 	u64 v64;
285 
286 	MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
287 	MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
288 	MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
289 	MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
290 	MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc);
291 	MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc);
292 	MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
293 	v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
294 	*flags_off = cpu_to_be64(v64);
295 }
296 
get_pkey_table_size(int pkey)297 static u16 get_pkey_table_size(int pkey)
298 {
299 	if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
300 		return 0;
301 
302 	return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
303 }
304 
fw2drv_caps(struct mlx5_caps * caps,void * out)305 static void fw2drv_caps(struct mlx5_caps *caps, void *out)
306 {
307 	struct mlx5_general_caps *gen = &caps->gen;
308 
309 	gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
310 	gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
311 	gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
312 	gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
313 	gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
314 	gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
315 	gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
316 	gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
317 	gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
318 	gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
319 	gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
320 	gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
321 	gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
322 	gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
323 	gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
324 	gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
325 	gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
326 	gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
327 	gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
328 	gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
329 	gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
330 	gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
331 	gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
332 	gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
333 	gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
334 	pr_debug("flags = 0x%llx\n", gen->flags);
335 	gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
336 	gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
337 	gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
338 	gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
339 	gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
340 	gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
341 	gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
342 	gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
343 	gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
344 	gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
345 	gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
346 }
347 
caps_opmod_str(u16 opmod)348 static const char *caps_opmod_str(u16 opmod)
349 {
350 	switch (opmod) {
351 	case HCA_CAP_OPMOD_GET_MAX:
352 		return "GET_MAX";
353 	case HCA_CAP_OPMOD_GET_CUR:
354 		return "GET_CUR";
355 	default:
356 		return "Invalid";
357 	}
358 }
359 
mlx5_core_get_caps(struct mlx5_core_dev * dev,struct mlx5_caps * caps,u16 opmod)360 int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
361 		       u16 opmod)
362 {
363 	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
364 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
365 	void *out;
366 	int err;
367 
368 	memset(in, 0, sizeof(in));
369 	out = kzalloc(out_sz, GFP_KERNEL);
370 	if (!out)
371 		return -ENOMEM;
372 	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
373 	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
374 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
375 	if (err)
376 		goto query_ex;
377 
378 	err = mlx5_cmd_status_to_err_v2(out);
379 	if (err) {
380 		mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
381 		goto query_ex;
382 	}
383 	mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
384 	fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
385 
386 query_ex:
387 	kfree(out);
388 	return err;
389 }
390 
set_caps(struct mlx5_core_dev * dev,void * in,int in_sz)391 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
392 {
393 	u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
394 	int err;
395 
396 	memset(out, 0, sizeof(out));
397 
398 	MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
399 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
400 	if (err)
401 		return err;
402 
403 	err = mlx5_cmd_status_to_err_v2(out);
404 
405 	return err;
406 }
407 
handle_hca_cap(struct mlx5_core_dev * dev)408 static int handle_hca_cap(struct mlx5_core_dev *dev)
409 {
410 	void *set_ctx = NULL;
411 	struct mlx5_profile *prof = dev->profile;
412 	struct mlx5_caps *cur_caps = NULL;
413 	struct mlx5_caps *max_caps = NULL;
414 	int err = -ENOMEM;
415 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
416 
417 	set_ctx = kzalloc(set_sz, GFP_KERNEL);
418 	if (!set_ctx)
419 		goto query_ex;
420 
421 	max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
422 	if (!max_caps)
423 		goto query_ex;
424 
425 	cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
426 	if (!cur_caps)
427 		goto query_ex;
428 
429 	err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
430 	if (err)
431 		goto query_ex;
432 
433 	err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
434 	if (err)
435 		goto query_ex;
436 
437 	/* we limit the size of the pkey table to 128 entries for now */
438 	cur_caps->gen.pkey_table_size = 128;
439 
440 	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
441 		cur_caps->gen.log_max_qp = prof->log_max_qp;
442 
443 	/* disable checksum */
444 	cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
445 
446 	copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
447 		       cur_caps);
448 	err = set_caps(dev, set_ctx, set_sz);
449 
450 query_ex:
451 	kfree(cur_caps);
452 	kfree(max_caps);
453 	kfree(set_ctx);
454 
455 	return err;
456 }
457 
set_hca_ctrl(struct mlx5_core_dev * dev)458 static int set_hca_ctrl(struct mlx5_core_dev *dev)
459 {
460 	struct mlx5_reg_host_endianess he_in;
461 	struct mlx5_reg_host_endianess he_out;
462 	int err;
463 
464 	memset(&he_in, 0, sizeof(he_in));
465 	he_in.he = MLX5_SET_HOST_ENDIANNESS;
466 	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
467 					&he_out, sizeof(he_out),
468 					MLX5_REG_HOST_ENDIANNESS, 0, 1);
469 	return err;
470 }
471 
mlx5_core_enable_hca(struct mlx5_core_dev * dev)472 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
473 {
474 	int err;
475 	struct mlx5_enable_hca_mbox_in in;
476 	struct mlx5_enable_hca_mbox_out out;
477 
478 	memset(&in, 0, sizeof(in));
479 	memset(&out, 0, sizeof(out));
480 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
481 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
482 	if (err)
483 		return err;
484 
485 	if (out.hdr.status)
486 		return mlx5_cmd_status_to_err(&out.hdr);
487 
488 	return 0;
489 }
490 
mlx5_core_disable_hca(struct mlx5_core_dev * dev)491 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
492 {
493 	int err;
494 	struct mlx5_disable_hca_mbox_in in;
495 	struct mlx5_disable_hca_mbox_out out;
496 
497 	memset(&in, 0, sizeof(in));
498 	memset(&out, 0, sizeof(out));
499 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
500 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
501 	if (err)
502 		return err;
503 
504 	if (out.hdr.status)
505 		return mlx5_cmd_status_to_err(&out.hdr);
506 
507 	return 0;
508 }
509 
mlx5_dev_init(struct mlx5_core_dev * dev,struct pci_dev * pdev)510 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
511 {
512 	struct mlx5_priv *priv = &dev->priv;
513 	int err;
514 
515 	dev->pdev = pdev;
516 	pci_set_drvdata(dev->pdev, dev);
517 	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
518 	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
519 
520 	mutex_init(&priv->pgdir_mutex);
521 	INIT_LIST_HEAD(&priv->pgdir_list);
522 	spin_lock_init(&priv->mkey_lock);
523 
524 	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
525 	if (!priv->dbg_root)
526 		return -ENOMEM;
527 
528 	err = pci_enable_device(pdev);
529 	if (err) {
530 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
531 		goto err_dbg;
532 	}
533 
534 	err = request_bar(pdev);
535 	if (err) {
536 		dev_err(&pdev->dev, "error requesting BARs, aborting\n");
537 		goto err_disable;
538 	}
539 
540 	pci_set_master(pdev);
541 
542 	err = set_dma_caps(pdev);
543 	if (err) {
544 		dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
545 		goto err_clr_master;
546 	}
547 
548 	dev->iseg_base = pci_resource_start(dev->pdev, 0);
549 	dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
550 	if (!dev->iseg) {
551 		err = -ENOMEM;
552 		dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
553 		goto err_clr_master;
554 	}
555 	dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
556 		 fw_rev_min(dev), fw_rev_sub(dev));
557 
558 	err = mlx5_cmd_init(dev);
559 	if (err) {
560 		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
561 		goto err_unmap;
562 	}
563 
564 	mlx5_pagealloc_init(dev);
565 
566 	err = mlx5_core_enable_hca(dev);
567 	if (err) {
568 		dev_err(&pdev->dev, "enable hca failed\n");
569 		goto err_pagealloc_cleanup;
570 	}
571 
572 	err = mlx5_satisfy_startup_pages(dev, 1);
573 	if (err) {
574 		dev_err(&pdev->dev, "failed to allocate boot pages\n");
575 		goto err_disable_hca;
576 	}
577 
578 	err = set_hca_ctrl(dev);
579 	if (err) {
580 		dev_err(&pdev->dev, "set_hca_ctrl failed\n");
581 		goto reclaim_boot_pages;
582 	}
583 
584 	err = handle_hca_cap(dev);
585 	if (err) {
586 		dev_err(&pdev->dev, "handle_hca_cap failed\n");
587 		goto reclaim_boot_pages;
588 	}
589 
590 	err = mlx5_satisfy_startup_pages(dev, 0);
591 	if (err) {
592 		dev_err(&pdev->dev, "failed to allocate init pages\n");
593 		goto reclaim_boot_pages;
594 	}
595 
596 	err = mlx5_pagealloc_start(dev);
597 	if (err) {
598 		dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
599 		goto reclaim_boot_pages;
600 	}
601 
602 	err = mlx5_cmd_init_hca(dev);
603 	if (err) {
604 		dev_err(&pdev->dev, "init hca failed\n");
605 		goto err_pagealloc_stop;
606 	}
607 
608 	mlx5_start_health_poll(dev);
609 
610 	err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
611 	if (err) {
612 		dev_err(&pdev->dev, "query hca failed\n");
613 		goto err_stop_poll;
614 	}
615 
616 	err = mlx5_cmd_query_adapter(dev);
617 	if (err) {
618 		dev_err(&pdev->dev, "query adapter failed\n");
619 		goto err_stop_poll;
620 	}
621 
622 	err = mlx5_enable_msix(dev);
623 	if (err) {
624 		dev_err(&pdev->dev, "enable msix failed\n");
625 		goto err_stop_poll;
626 	}
627 
628 	err = mlx5_eq_init(dev);
629 	if (err) {
630 		dev_err(&pdev->dev, "failed to initialize eq\n");
631 		goto disable_msix;
632 	}
633 
634 	err = mlx5_alloc_uuars(dev, &priv->uuari);
635 	if (err) {
636 		dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
637 		goto err_eq_cleanup;
638 	}
639 
640 	err = mlx5_start_eqs(dev);
641 	if (err) {
642 		dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
643 		goto err_free_uar;
644 	}
645 
646 	MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
647 
648 	mlx5_init_cq_table(dev);
649 	mlx5_init_qp_table(dev);
650 	mlx5_init_srq_table(dev);
651 	mlx5_init_mr_table(dev);
652 
653 	return 0;
654 
655 err_free_uar:
656 	mlx5_free_uuars(dev, &priv->uuari);
657 
658 err_eq_cleanup:
659 	mlx5_eq_cleanup(dev);
660 
661 disable_msix:
662 	mlx5_disable_msix(dev);
663 
664 err_stop_poll:
665 	mlx5_stop_health_poll(dev);
666 	if (mlx5_cmd_teardown_hca(dev)) {
667 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
668 		return err;
669 	}
670 
671 err_pagealloc_stop:
672 	mlx5_pagealloc_stop(dev);
673 
674 reclaim_boot_pages:
675 	mlx5_reclaim_startup_pages(dev);
676 
677 err_disable_hca:
678 	mlx5_core_disable_hca(dev);
679 
680 err_pagealloc_cleanup:
681 	mlx5_pagealloc_cleanup(dev);
682 	mlx5_cmd_cleanup(dev);
683 
684 err_unmap:
685 	iounmap(dev->iseg);
686 
687 err_clr_master:
688 	pci_clear_master(dev->pdev);
689 	release_bar(dev->pdev);
690 
691 err_disable:
692 	pci_disable_device(dev->pdev);
693 
694 err_dbg:
695 	debugfs_remove(priv->dbg_root);
696 	return err;
697 }
698 EXPORT_SYMBOL(mlx5_dev_init);
699 
mlx5_dev_cleanup(struct mlx5_core_dev * dev)700 static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
701 {
702 	struct mlx5_priv *priv = &dev->priv;
703 
704 	mlx5_cleanup_srq_table(dev);
705 	mlx5_cleanup_qp_table(dev);
706 	mlx5_cleanup_cq_table(dev);
707 	mlx5_stop_eqs(dev);
708 	mlx5_free_uuars(dev, &priv->uuari);
709 	mlx5_eq_cleanup(dev);
710 	mlx5_disable_msix(dev);
711 	mlx5_stop_health_poll(dev);
712 	if (mlx5_cmd_teardown_hca(dev)) {
713 		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
714 		return;
715 	}
716 	mlx5_pagealloc_stop(dev);
717 	mlx5_reclaim_startup_pages(dev);
718 	mlx5_core_disable_hca(dev);
719 	mlx5_pagealloc_cleanup(dev);
720 	mlx5_cmd_cleanup(dev);
721 	iounmap(dev->iseg);
722 	pci_clear_master(dev->pdev);
723 	release_bar(dev->pdev);
724 	pci_disable_device(dev->pdev);
725 	debugfs_remove(priv->dbg_root);
726 }
727 
mlx5_add_device(struct mlx5_interface * intf,struct mlx5_priv * priv)728 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
729 {
730 	struct mlx5_device_context *dev_ctx;
731 	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
732 
733 	dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
734 	if (!dev_ctx) {
735 		pr_warn("mlx5_add_device: alloc context failed\n");
736 		return;
737 	}
738 
739 	dev_ctx->intf    = intf;
740 	dev_ctx->context = intf->add(dev);
741 
742 	if (dev_ctx->context) {
743 		spin_lock_irq(&priv->ctx_lock);
744 		list_add_tail(&dev_ctx->list, &priv->ctx_list);
745 		spin_unlock_irq(&priv->ctx_lock);
746 	} else {
747 		kfree(dev_ctx);
748 	}
749 }
750 
mlx5_remove_device(struct mlx5_interface * intf,struct mlx5_priv * priv)751 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
752 {
753 	struct mlx5_device_context *dev_ctx;
754 	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
755 
756 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
757 		if (dev_ctx->intf == intf) {
758 			spin_lock_irq(&priv->ctx_lock);
759 			list_del(&dev_ctx->list);
760 			spin_unlock_irq(&priv->ctx_lock);
761 
762 			intf->remove(dev, dev_ctx->context);
763 			kfree(dev_ctx);
764 			return;
765 		}
766 }
mlx5_register_device(struct mlx5_core_dev * dev)767 static int mlx5_register_device(struct mlx5_core_dev *dev)
768 {
769 	struct mlx5_priv *priv = &dev->priv;
770 	struct mlx5_interface *intf;
771 
772 	mutex_lock(&intf_mutex);
773 	list_add_tail(&priv->dev_list, &dev_list);
774 	list_for_each_entry(intf, &intf_list, list)
775 		mlx5_add_device(intf, priv);
776 	mutex_unlock(&intf_mutex);
777 
778 	return 0;
779 }
mlx5_unregister_device(struct mlx5_core_dev * dev)780 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
781 {
782 	struct mlx5_priv *priv = &dev->priv;
783 	struct mlx5_interface *intf;
784 
785 	mutex_lock(&intf_mutex);
786 	list_for_each_entry(intf, &intf_list, list)
787 		mlx5_remove_device(intf, priv);
788 	list_del(&priv->dev_list);
789 	mutex_unlock(&intf_mutex);
790 }
791 
mlx5_register_interface(struct mlx5_interface * intf)792 int mlx5_register_interface(struct mlx5_interface *intf)
793 {
794 	struct mlx5_priv *priv;
795 
796 	if (!intf->add || !intf->remove)
797 		return -EINVAL;
798 
799 	mutex_lock(&intf_mutex);
800 	list_add_tail(&intf->list, &intf_list);
801 	list_for_each_entry(priv, &dev_list, dev_list)
802 		mlx5_add_device(intf, priv);
803 	mutex_unlock(&intf_mutex);
804 
805 	return 0;
806 }
807 EXPORT_SYMBOL(mlx5_register_interface);
808 
mlx5_unregister_interface(struct mlx5_interface * intf)809 void mlx5_unregister_interface(struct mlx5_interface *intf)
810 {
811 	struct mlx5_priv *priv;
812 
813 	mutex_lock(&intf_mutex);
814 	list_for_each_entry(priv, &dev_list, dev_list)
815 	       mlx5_remove_device(intf, priv);
816 	list_del(&intf->list);
817 	mutex_unlock(&intf_mutex);
818 }
819 EXPORT_SYMBOL(mlx5_unregister_interface);
820 
mlx5_core_event(struct mlx5_core_dev * dev,enum mlx5_dev_event event,unsigned long param)821 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
822 			    unsigned long param)
823 {
824 	struct mlx5_priv *priv = &dev->priv;
825 	struct mlx5_device_context *dev_ctx;
826 	unsigned long flags;
827 
828 	spin_lock_irqsave(&priv->ctx_lock, flags);
829 
830 	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
831 		if (dev_ctx->intf->event)
832 			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
833 
834 	spin_unlock_irqrestore(&priv->ctx_lock, flags);
835 }
836 
837 struct mlx5_core_event_handler {
838 	void (*event)(struct mlx5_core_dev *dev,
839 		      enum mlx5_dev_event event,
840 		      void *data);
841 };
842 
init_one(struct pci_dev * pdev,const struct pci_device_id * id)843 static int init_one(struct pci_dev *pdev,
844 		    const struct pci_device_id *id)
845 {
846 	struct mlx5_core_dev *dev;
847 	struct mlx5_priv *priv;
848 	int err;
849 
850 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
851 	if (!dev) {
852 		dev_err(&pdev->dev, "kzalloc failed\n");
853 		return -ENOMEM;
854 	}
855 	priv = &dev->priv;
856 
857 	pci_set_drvdata(pdev, dev);
858 
859 	if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
860 		pr_warn("selected profile out of range, selecting default (%d)\n",
861 			MLX5_DEFAULT_PROF);
862 		prof_sel = MLX5_DEFAULT_PROF;
863 	}
864 	dev->profile = &profile[prof_sel];
865 	dev->event = mlx5_core_event;
866 
867 	INIT_LIST_HEAD(&priv->ctx_list);
868 	spin_lock_init(&priv->ctx_lock);
869 	err = mlx5_dev_init(dev, pdev);
870 	if (err) {
871 		dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
872 		goto out;
873 	}
874 
875 	err = mlx5_register_device(dev);
876 	if (err) {
877 		dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
878 		goto out_init;
879 	}
880 
881 	return 0;
882 
883 out_init:
884 	mlx5_dev_cleanup(dev);
885 out:
886 	kfree(dev);
887 	return err;
888 }
remove_one(struct pci_dev * pdev)889 static void remove_one(struct pci_dev *pdev)
890 {
891 	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
892 
893 	mlx5_unregister_device(dev);
894 	mlx5_dev_cleanup(dev);
895 	kfree(dev);
896 }
897 
898 static const struct pci_device_id mlx5_core_pci_table[] = {
899 	{ PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
900 	{ PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
901 	{ 0, }
902 };
903 
904 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
905 
906 static struct pci_driver mlx5_core_driver = {
907 	.name           = DRIVER_NAME,
908 	.id_table       = mlx5_core_pci_table,
909 	.probe          = init_one,
910 	.remove         = remove_one
911 };
912 
init(void)913 static int __init init(void)
914 {
915 	int err;
916 
917 	mlx5_register_debugfs();
918 	mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
919 	if (!mlx5_core_wq) {
920 		err = -ENOMEM;
921 		goto err_debug;
922 	}
923 	mlx5_health_init();
924 
925 	err = pci_register_driver(&mlx5_core_driver);
926 	if (err)
927 		goto err_health;
928 
929 	return 0;
930 
931 err_health:
932 	mlx5_health_cleanup();
933 	destroy_workqueue(mlx5_core_wq);
934 err_debug:
935 	mlx5_unregister_debugfs();
936 	return err;
937 }
938 
cleanup(void)939 static void __exit cleanup(void)
940 {
941 	pci_unregister_driver(&mlx5_core_driver);
942 	mlx5_health_cleanup();
943 	destroy_workqueue(mlx5_core_wq);
944 	mlx5_unregister_debugfs();
945 }
946 
947 module_init(init);
948 module_exit(cleanup);
949