1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/interrupt.h>
5 #include <linux/notifier.h>
6 #include <linux/module.h>
7 #include <linux/mlx5/driver.h>
8 #include "mlx5_core.h"
9 #include "mlx5_irq.h"
10 #include "lib/sf.h"
11 #ifdef CONFIG_RFS_ACCEL
12 #include <linux/cpu_rmap.h>
13 #endif
14
15 #define MLX5_MAX_IRQ_NAME (32)
16 /* max irq_index is 2047, so four chars */
17 #define MLX5_MAX_IRQ_IDX_CHARS (4)
18
19 #define MLX5_SFS_PER_CTRL_IRQ 64
20 #define MLX5_IRQ_CTRL_SF_MAX 8
21 /* min num of vectors for SFs to be enabled */
22 #define MLX5_IRQ_VEC_COMP_BASE_SF 2
23
24 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
25 #define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
26 #define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
27 #define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
28 #define MLX5_EQ_REFS_PER_IRQ (2)
29
30 struct mlx5_irq {
31 struct atomic_notifier_head nh;
32 cpumask_var_t mask;
33 char name[MLX5_MAX_IRQ_NAME];
34 struct mlx5_irq_pool *pool;
35 int refcount;
36 u32 index;
37 int irqn;
38 };
39
40 struct mlx5_irq_pool {
41 char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
42 struct xa_limit xa_num_irqs;
43 struct mutex lock; /* sync IRQs creations */
44 struct xarray irqs;
45 u32 max_threshold;
46 u32 min_threshold;
47 struct mlx5_core_dev *dev;
48 };
49
50 struct mlx5_irq_table {
51 struct mlx5_irq_pool *pf_pool;
52 struct mlx5_irq_pool *sf_ctrl_pool;
53 struct mlx5_irq_pool *sf_comp_pool;
54 };
55
56 /**
57 * mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
58 * to be ssigned to each VF.
59 * @dev: PF to work on
60 * @num_vfs: Number of enabled VFs
61 */
mlx5_get_default_msix_vec_count(struct mlx5_core_dev * dev,int num_vfs)62 int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
63 {
64 int num_vf_msix, min_msix, max_msix;
65
66 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
67 if (!num_vf_msix)
68 return 0;
69
70 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
71 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
72
73 /* Limit maximum number of MSI-X vectors so the default configuration
74 * has some available in the pool. This will allow the user to increase
75 * the number of vectors in a VF without having to first size-down other
76 * VFs.
77 */
78 return max(min(num_vf_msix / num_vfs, max_msix / 2), min_msix);
79 }
80
81 /**
82 * mlx5_set_msix_vec_count - Set dynamically allocated MSI-X on the VF
83 * @dev: PF to work on
84 * @function_id: Internal PCI VF function IDd
85 * @msix_vec_count: Number of MSI-X vectors to set
86 */
mlx5_set_msix_vec_count(struct mlx5_core_dev * dev,int function_id,int msix_vec_count)87 int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
88 int msix_vec_count)
89 {
90 int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
91 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
92 void *hca_cap = NULL, *query_cap = NULL, *cap;
93 int num_vf_msix, min_msix, max_msix;
94 int ret;
95
96 num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
97 if (!num_vf_msix)
98 return 0;
99
100 if (!MLX5_CAP_GEN(dev, vport_group_manager) || !mlx5_core_is_pf(dev))
101 return -EOPNOTSUPP;
102
103 min_msix = MLX5_CAP_GEN(dev, min_dynamic_vf_msix_table_size);
104 max_msix = MLX5_CAP_GEN(dev, max_dynamic_vf_msix_table_size);
105
106 if (msix_vec_count < min_msix)
107 return -EINVAL;
108
109 if (msix_vec_count > max_msix)
110 return -EOVERFLOW;
111
112 query_cap = kzalloc(query_sz, GFP_KERNEL);
113 hca_cap = kzalloc(set_sz, GFP_KERNEL);
114 if (!hca_cap || !query_cap) {
115 ret = -ENOMEM;
116 goto out;
117 }
118
119 ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
120 if (ret)
121 goto out;
122
123 cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
124 memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
125 MLX5_UN_SZ_BYTES(hca_cap_union));
126 MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
127
128 MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
129 MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
130 MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
131
132 MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
133 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
134 ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
135 out:
136 kfree(hca_cap);
137 kfree(query_cap);
138 return ret;
139 }
140
mlx5_system_free_irq(struct mlx5_irq * irq)141 static void mlx5_system_free_irq(struct mlx5_irq *irq)
142 {
143 /* free_irq requires that affinity and rmap will be cleared
144 * before calling it. This is why there is asymmetry with set_rmap
145 * which should be called after alloc_irq but before request_irq.
146 */
147 irq_set_affinity_hint(irq->irqn, NULL);
148 free_irq(irq->irqn, &irq->nh);
149 }
150
irq_release(struct mlx5_irq * irq)151 static void irq_release(struct mlx5_irq *irq)
152 {
153 struct mlx5_irq_pool *pool = irq->pool;
154
155 xa_erase(&pool->irqs, irq->index);
156 mlx5_system_free_irq(irq);
157 free_cpumask_var(irq->mask);
158 kfree(irq);
159 }
160
irq_put(struct mlx5_irq * irq)161 static void irq_put(struct mlx5_irq *irq)
162 {
163 struct mlx5_irq_pool *pool = irq->pool;
164
165 mutex_lock(&pool->lock);
166 irq->refcount--;
167 if (!irq->refcount)
168 irq_release(irq);
169 mutex_unlock(&pool->lock);
170 }
171
irq_get_locked(struct mlx5_irq * irq)172 static int irq_get_locked(struct mlx5_irq *irq)
173 {
174 lockdep_assert_held(&irq->pool->lock);
175 if (WARN_ON_ONCE(!irq->refcount))
176 return 0;
177 irq->refcount++;
178 return 1;
179 }
180
irq_get(struct mlx5_irq * irq)181 static int irq_get(struct mlx5_irq *irq)
182 {
183 int err;
184
185 mutex_lock(&irq->pool->lock);
186 err = irq_get_locked(irq);
187 mutex_unlock(&irq->pool->lock);
188 return err;
189 }
190
irq_int_handler(int irq,void * nh)191 static irqreturn_t irq_int_handler(int irq, void *nh)
192 {
193 atomic_notifier_call_chain(nh, 0, NULL);
194 return IRQ_HANDLED;
195 }
196
irq_sf_set_name(struct mlx5_irq_pool * pool,char * name,int vecidx)197 static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
198 {
199 snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
200 }
201
irq_set_name(char * name,int vecidx)202 static void irq_set_name(char *name, int vecidx)
203 {
204 if (vecidx == 0) {
205 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
206 return;
207 }
208
209 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
210 vecidx - MLX5_IRQ_VEC_COMP_BASE);
211 }
212
irq_request(struct mlx5_irq_pool * pool,int i)213 static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
214 {
215 struct mlx5_core_dev *dev = pool->dev;
216 char name[MLX5_MAX_IRQ_NAME];
217 struct mlx5_irq *irq;
218 int err;
219
220 irq = kzalloc(sizeof(*irq), GFP_KERNEL);
221 if (!irq)
222 return ERR_PTR(-ENOMEM);
223 irq->irqn = pci_irq_vector(dev->pdev, i);
224 if (!pool->name[0])
225 irq_set_name(name, i);
226 else
227 irq_sf_set_name(pool, name, i);
228 ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
229 snprintf(irq->name, MLX5_MAX_IRQ_NAME,
230 "%s@pci:%s", name, pci_name(dev->pdev));
231 err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
232 &irq->nh);
233 if (err) {
234 mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
235 goto err_req_irq;
236 }
237 if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
238 mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
239 err = -ENOMEM;
240 goto err_cpumask;
241 }
242 irq->pool = pool;
243 irq->refcount = 1;
244 irq->index = i;
245 err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
246 if (err) {
247 mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
248 irq->index, err);
249 goto err_xa;
250 }
251 return irq;
252 err_xa:
253 free_cpumask_var(irq->mask);
254 err_cpumask:
255 free_irq(irq->irqn, &irq->nh);
256 err_req_irq:
257 kfree(irq);
258 return ERR_PTR(err);
259 }
260
mlx5_irq_attach_nb(struct mlx5_irq * irq,struct notifier_block * nb)261 int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
262 {
263 int ret;
264
265 ret = irq_get(irq);
266 if (!ret)
267 /* Something very bad happens here, we are enabling EQ
268 * on non-existing IRQ.
269 */
270 return -ENOENT;
271 ret = atomic_notifier_chain_register(&irq->nh, nb);
272 if (ret)
273 irq_put(irq);
274 return ret;
275 }
276
mlx5_irq_detach_nb(struct mlx5_irq * irq,struct notifier_block * nb)277 int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
278 {
279 int err = 0;
280
281 err = atomic_notifier_chain_unregister(&irq->nh, nb);
282 irq_put(irq);
283 return err;
284 }
285
mlx5_irq_get_affinity_mask(struct mlx5_irq * irq)286 struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
287 {
288 return irq->mask;
289 }
290
mlx5_irq_get_index(struct mlx5_irq * irq)291 int mlx5_irq_get_index(struct mlx5_irq *irq)
292 {
293 return irq->index;
294 }
295
296 /* irq_pool API */
297
298 /* creating an irq from irq_pool */
irq_pool_create_irq(struct mlx5_irq_pool * pool,struct cpumask * affinity)299 static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
300 struct cpumask *affinity)
301 {
302 struct mlx5_irq *irq;
303 u32 irq_index;
304 int err;
305
306 err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
307 GFP_KERNEL);
308 if (err)
309 return ERR_PTR(err);
310 irq = irq_request(pool, irq_index);
311 if (IS_ERR(irq))
312 return irq;
313 cpumask_copy(irq->mask, affinity);
314 irq_set_affinity_hint(irq->irqn, irq->mask);
315 return irq;
316 }
317
318 /* looking for the irq with the smallest refcount and the same affinity */
irq_pool_find_least_loaded(struct mlx5_irq_pool * pool,struct cpumask * affinity)319 static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
320 struct cpumask *affinity)
321 {
322 int start = pool->xa_num_irqs.min;
323 int end = pool->xa_num_irqs.max;
324 struct mlx5_irq *irq = NULL;
325 struct mlx5_irq *iter;
326 unsigned long index;
327
328 lockdep_assert_held(&pool->lock);
329 xa_for_each_range(&pool->irqs, index, iter, start, end) {
330 if (!cpumask_equal(iter->mask, affinity))
331 continue;
332 if (iter->refcount < pool->min_threshold)
333 return iter;
334 if (!irq || iter->refcount < irq->refcount)
335 irq = iter;
336 }
337 return irq;
338 }
339
340 /* requesting an irq from a given pool according to given affinity */
irq_pool_request_affinity(struct mlx5_irq_pool * pool,struct cpumask * affinity)341 static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
342 struct cpumask *affinity)
343 {
344 struct mlx5_irq *least_loaded_irq, *new_irq;
345
346 mutex_lock(&pool->lock);
347 least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
348 if (least_loaded_irq &&
349 least_loaded_irq->refcount < pool->min_threshold)
350 goto out;
351 new_irq = irq_pool_create_irq(pool, affinity);
352 if (IS_ERR(new_irq)) {
353 if (!least_loaded_irq) {
354 mlx5_core_err(pool->dev, "Didn't find a matching IRQ. err = %ld\n",
355 PTR_ERR(new_irq));
356 mutex_unlock(&pool->lock);
357 return new_irq;
358 }
359 /* We failed to create a new IRQ for the requested affinity,
360 * sharing existing IRQ.
361 */
362 goto out;
363 }
364 least_loaded_irq = new_irq;
365 goto unlock;
366 out:
367 irq_get_locked(least_loaded_irq);
368 if (least_loaded_irq->refcount > pool->max_threshold)
369 mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
370 least_loaded_irq->irqn, pool->name,
371 least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
372 unlock:
373 mutex_unlock(&pool->lock);
374 return least_loaded_irq;
375 }
376
377 /* requesting an irq from a given pool according to given index */
378 static struct mlx5_irq *
irq_pool_request_vector(struct mlx5_irq_pool * pool,int vecidx,struct cpumask * affinity)379 irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
380 struct cpumask *affinity)
381 {
382 struct mlx5_irq *irq;
383
384 mutex_lock(&pool->lock);
385 irq = xa_load(&pool->irqs, vecidx);
386 if (irq) {
387 irq_get_locked(irq);
388 goto unlock;
389 }
390 irq = irq_request(pool, vecidx);
391 if (IS_ERR(irq) || !affinity)
392 goto unlock;
393 cpumask_copy(irq->mask, affinity);
394 irq_set_affinity_hint(irq->irqn, irq->mask);
395 unlock:
396 mutex_unlock(&pool->lock);
397 return irq;
398 }
399
find_sf_irq_pool(struct mlx5_irq_table * irq_table,int i,struct cpumask * affinity)400 static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
401 int i, struct cpumask *affinity)
402 {
403 if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
404 return irq_table->sf_ctrl_pool;
405 return irq_table->sf_comp_pool;
406 }
407
408 /**
409 * mlx5_irq_release - release an IRQ back to the system.
410 * @irq: irq to be released.
411 */
mlx5_irq_release(struct mlx5_irq * irq)412 void mlx5_irq_release(struct mlx5_irq *irq)
413 {
414 synchronize_irq(irq->irqn);
415 irq_put(irq);
416 }
417
418 /**
419 * mlx5_irq_request - request an IRQ for mlx5 device.
420 * @dev: mlx5 device that requesting the IRQ.
421 * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
422 * provided.
423 * @affinity: cpumask requested for this IRQ.
424 *
425 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
426 */
mlx5_irq_request(struct mlx5_core_dev * dev,u16 vecidx,struct cpumask * affinity)427 struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
428 struct cpumask *affinity)
429 {
430 struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
431 struct mlx5_irq_pool *pool;
432 struct mlx5_irq *irq;
433
434 if (mlx5_core_is_sf(dev)) {
435 pool = find_sf_irq_pool(irq_table, vecidx, affinity);
436 if (!pool)
437 /* we don't have IRQs for SFs, using the PF IRQs */
438 goto pf_irq;
439 if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp"))
440 /* In case an SF user request IRQ with vecidx */
441 irq = irq_pool_request_vector(pool, vecidx, NULL);
442 else
443 irq = irq_pool_request_affinity(pool, affinity);
444 goto out;
445 }
446 pf_irq:
447 pool = irq_table->pf_pool;
448 irq = irq_pool_request_vector(pool, vecidx, affinity);
449 out:
450 if (IS_ERR(irq))
451 return irq;
452 mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
453 irq->irqn, cpumask_pr_args(affinity),
454 irq->refcount / MLX5_EQ_REFS_PER_IRQ);
455 return irq;
456 }
457
458 static struct mlx5_irq_pool *
irq_pool_alloc(struct mlx5_core_dev * dev,int start,int size,char * name,u32 min_threshold,u32 max_threshold)459 irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
460 u32 min_threshold, u32 max_threshold)
461 {
462 struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
463
464 if (!pool)
465 return ERR_PTR(-ENOMEM);
466 pool->dev = dev;
467 mutex_init(&pool->lock);
468 xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
469 pool->xa_num_irqs.min = start;
470 pool->xa_num_irqs.max = start + size - 1;
471 if (name)
472 snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
473 name);
474 pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
475 pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
476 mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
477 name, size, start);
478 return pool;
479 }
480
irq_pool_free(struct mlx5_irq_pool * pool)481 static void irq_pool_free(struct mlx5_irq_pool *pool)
482 {
483 struct mlx5_irq *irq;
484 unsigned long index;
485
486 /* There are cases in which we are destrying the irq_table before
487 * freeing all the IRQs, fast teardown for example. Hence, free the irqs
488 * which might not have been freed.
489 */
490 xa_for_each(&pool->irqs, index, irq)
491 irq_release(irq);
492 xa_destroy(&pool->irqs);
493 mutex_destroy(&pool->lock);
494 kvfree(pool);
495 }
496
irq_pools_init(struct mlx5_core_dev * dev,int sf_vec,int pf_vec)497 static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
498 {
499 struct mlx5_irq_table *table = dev->priv.irq_table;
500 int num_sf_ctrl_by_msix;
501 int num_sf_ctrl_by_sfs;
502 int num_sf_ctrl;
503 int err;
504
505 /* init pf_pool */
506 table->pf_pool = irq_pool_alloc(dev, 0, pf_vec, NULL,
507 MLX5_EQ_SHARE_IRQ_MIN_COMP,
508 MLX5_EQ_SHARE_IRQ_MAX_COMP);
509 if (IS_ERR(table->pf_pool))
510 return PTR_ERR(table->pf_pool);
511 if (!mlx5_sf_max_functions(dev))
512 return 0;
513 if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
514 mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
515 return 0;
516 }
517
518 /* init sf_ctrl_pool */
519 num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
520 num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
521 MLX5_SFS_PER_CTRL_IRQ);
522 num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
523 num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
524 table->sf_ctrl_pool = irq_pool_alloc(dev, pf_vec, num_sf_ctrl,
525 "mlx5_sf_ctrl",
526 MLX5_EQ_SHARE_IRQ_MIN_CTRL,
527 MLX5_EQ_SHARE_IRQ_MAX_CTRL);
528 if (IS_ERR(table->sf_ctrl_pool)) {
529 err = PTR_ERR(table->sf_ctrl_pool);
530 goto err_pf;
531 }
532 /* init sf_comp_pool */
533 table->sf_comp_pool = irq_pool_alloc(dev, pf_vec + num_sf_ctrl,
534 sf_vec - num_sf_ctrl, "mlx5_sf_comp",
535 MLX5_EQ_SHARE_IRQ_MIN_COMP,
536 MLX5_EQ_SHARE_IRQ_MAX_COMP);
537 if (IS_ERR(table->sf_comp_pool)) {
538 err = PTR_ERR(table->sf_comp_pool);
539 goto err_sf_ctrl;
540 }
541 return 0;
542 err_sf_ctrl:
543 irq_pool_free(table->sf_ctrl_pool);
544 err_pf:
545 irq_pool_free(table->pf_pool);
546 return err;
547 }
548
irq_pools_destroy(struct mlx5_irq_table * table)549 static void irq_pools_destroy(struct mlx5_irq_table *table)
550 {
551 if (table->sf_ctrl_pool) {
552 irq_pool_free(table->sf_comp_pool);
553 irq_pool_free(table->sf_ctrl_pool);
554 }
555 irq_pool_free(table->pf_pool);
556 }
557
mlx5_irq_pool_free_irqs(struct mlx5_irq_pool * pool)558 static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
559 {
560 struct mlx5_irq *irq;
561 unsigned long index;
562
563 xa_for_each(&pool->irqs, index, irq)
564 mlx5_system_free_irq(irq);
565 }
566
mlx5_irq_pools_free_irqs(struct mlx5_irq_table * table)567 static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
568 {
569 if (table->sf_ctrl_pool) {
570 mlx5_irq_pool_free_irqs(table->sf_comp_pool);
571 mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
572 }
573 mlx5_irq_pool_free_irqs(table->pf_pool);
574 }
575
576 /* irq_table API */
577
mlx5_irq_table_init(struct mlx5_core_dev * dev)578 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
579 {
580 struct mlx5_irq_table *irq_table;
581
582 if (mlx5_core_is_sf(dev))
583 return 0;
584
585 irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
586 if (!irq_table)
587 return -ENOMEM;
588
589 dev->priv.irq_table = irq_table;
590 return 0;
591 }
592
mlx5_irq_table_cleanup(struct mlx5_core_dev * dev)593 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
594 {
595 if (mlx5_core_is_sf(dev))
596 return;
597
598 kvfree(dev->priv.irq_table);
599 }
600
mlx5_irq_table_get_num_comp(struct mlx5_irq_table * table)601 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
602 {
603 return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
604 }
605
mlx5_irq_table_create(struct mlx5_core_dev * dev)606 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
607 {
608 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
609 MLX5_CAP_GEN(dev, max_num_eqs) :
610 1 << MLX5_CAP_GEN(dev, log_max_eq);
611 int total_vec;
612 int pf_vec;
613 int err;
614
615 if (mlx5_core_is_sf(dev))
616 return 0;
617
618 pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
619 MLX5_IRQ_VEC_COMP_BASE;
620 pf_vec = min_t(int, pf_vec, num_eqs);
621 if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
622 return -ENOMEM;
623
624 total_vec = pf_vec;
625 if (mlx5_sf_max_functions(dev))
626 total_vec += MLX5_IRQ_CTRL_SF_MAX +
627 MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
628
629 total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
630 total_vec, PCI_IRQ_MSIX);
631 if (total_vec < 0)
632 return total_vec;
633 pf_vec = min(pf_vec, total_vec);
634
635 err = irq_pools_init(dev, total_vec - pf_vec, pf_vec);
636 if (err)
637 pci_free_irq_vectors(dev->pdev);
638
639 return err;
640 }
641
mlx5_irq_table_destroy(struct mlx5_core_dev * dev)642 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
643 {
644 struct mlx5_irq_table *table = dev->priv.irq_table;
645
646 if (mlx5_core_is_sf(dev))
647 return;
648
649 /* There are cases where IRQs still will be in used when we reaching
650 * to here. Hence, making sure all the irqs are released.
651 */
652 irq_pools_destroy(table);
653 pci_free_irq_vectors(dev->pdev);
654 }
655
mlx5_irq_table_free_irqs(struct mlx5_core_dev * dev)656 void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
657 {
658 struct mlx5_irq_table *table = dev->priv.irq_table;
659
660 if (mlx5_core_is_sf(dev))
661 return;
662
663 mlx5_irq_pools_free_irqs(table);
664 pci_free_irq_vectors(dev->pdev);
665 }
666
mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table * table)667 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
668 {
669 if (table->sf_comp_pool)
670 return min_t(int, num_online_cpus(),
671 table->sf_comp_pool->xa_num_irqs.max -
672 table->sf_comp_pool->xa_num_irqs.min + 1);
673 else
674 return mlx5_irq_table_get_num_comp(table);
675 }
676
mlx5_irq_table_get(struct mlx5_core_dev * dev)677 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
678 {
679 #ifdef CONFIG_MLX5_SF
680 if (mlx5_core_is_sf(dev))
681 return dev->priv.parent_mdev->priv.irq_table;
682 #endif
683 return dev->priv.irq_table;
684 }
685