1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/interrupt.h>
5 #include <linux/notifier.h>
6 #include <linux/module.h>
7 #include <linux/mlx5/driver.h>
8 #include "mlx5_core.h"
9 #ifdef CONFIG_RFS_ACCEL
10 #include <linux/cpu_rmap.h>
11 #endif
12
13 #define MLX5_MAX_IRQ_NAME (32)
14
15 struct mlx5_irq {
16 struct atomic_notifier_head nh;
17 cpumask_var_t mask;
18 char name[MLX5_MAX_IRQ_NAME];
19 };
20
21 struct mlx5_irq_table {
22 struct mlx5_irq *irq;
23 int nvec;
24 #ifdef CONFIG_RFS_ACCEL
25 struct cpu_rmap *rmap;
26 #endif
27 };
28
mlx5_irq_table_init(struct mlx5_core_dev * dev)29 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
30 {
31 struct mlx5_irq_table *irq_table;
32
33 irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
34 if (!irq_table)
35 return -ENOMEM;
36
37 dev->priv.irq_table = irq_table;
38 return 0;
39 }
40
mlx5_irq_table_cleanup(struct mlx5_core_dev * dev)41 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
42 {
43 kvfree(dev->priv.irq_table);
44 }
45
mlx5_irq_get_num_comp(struct mlx5_irq_table * table)46 int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
47 {
48 return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
49 }
50
mlx5_irq_get(struct mlx5_core_dev * dev,int vecidx)51 static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
52 {
53 struct mlx5_irq_table *irq_table = dev->priv.irq_table;
54
55 return &irq_table->irq[vecidx];
56 }
57
mlx5_irq_attach_nb(struct mlx5_irq_table * irq_table,int vecidx,struct notifier_block * nb)58 int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
59 struct notifier_block *nb)
60 {
61 struct mlx5_irq *irq;
62
63 irq = &irq_table->irq[vecidx];
64 return atomic_notifier_chain_register(&irq->nh, nb);
65 }
66
mlx5_irq_detach_nb(struct mlx5_irq_table * irq_table,int vecidx,struct notifier_block * nb)67 int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
68 struct notifier_block *nb)
69 {
70 struct mlx5_irq *irq;
71
72 irq = &irq_table->irq[vecidx];
73 return atomic_notifier_chain_unregister(&irq->nh, nb);
74 }
75
mlx5_irq_int_handler(int irq,void * nh)76 static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
77 {
78 atomic_notifier_call_chain(nh, 0, NULL);
79 return IRQ_HANDLED;
80 }
81
irq_set_name(char * name,int vecidx)82 static void irq_set_name(char *name, int vecidx)
83 {
84 if (vecidx == 0) {
85 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
86 return;
87 }
88
89 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
90 vecidx - MLX5_IRQ_VEC_COMP_BASE);
91 return;
92 }
93
request_irqs(struct mlx5_core_dev * dev,int nvec)94 static int request_irqs(struct mlx5_core_dev *dev, int nvec)
95 {
96 char name[MLX5_MAX_IRQ_NAME];
97 int err;
98 int i;
99
100 for (i = 0; i < nvec; i++) {
101 struct mlx5_irq *irq = mlx5_irq_get(dev, i);
102 int irqn = pci_irq_vector(dev->pdev, i);
103
104 irq_set_name(name, i);
105 ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
106 snprintf(irq->name, MLX5_MAX_IRQ_NAME,
107 "%s@pci:%s", name, pci_name(dev->pdev));
108 err = request_irq(irqn, mlx5_irq_int_handler, 0, irq->name,
109 &irq->nh);
110 if (err) {
111 mlx5_core_err(dev, "Failed to request irq\n");
112 goto err_request_irq;
113 }
114 }
115 return 0;
116
117 err_request_irq:
118 while (i--) {
119 struct mlx5_irq *irq = mlx5_irq_get(dev, i);
120 int irqn = pci_irq_vector(dev->pdev, i);
121
122 free_irq(irqn, &irq->nh);
123 }
124 return err;
125 }
126
irq_clear_rmap(struct mlx5_core_dev * dev)127 static void irq_clear_rmap(struct mlx5_core_dev *dev)
128 {
129 #ifdef CONFIG_RFS_ACCEL
130 struct mlx5_irq_table *irq_table = dev->priv.irq_table;
131
132 free_irq_cpu_rmap(irq_table->rmap);
133 #endif
134 }
135
irq_set_rmap(struct mlx5_core_dev * mdev)136 static int irq_set_rmap(struct mlx5_core_dev *mdev)
137 {
138 int err = 0;
139 #ifdef CONFIG_RFS_ACCEL
140 struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
141 int num_affinity_vec;
142 int vecidx;
143
144 num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
145 irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
146 if (!irq_table->rmap) {
147 err = -ENOMEM;
148 mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
149 goto err_out;
150 }
151
152 vecidx = MLX5_IRQ_VEC_COMP_BASE;
153 for (; vecidx < irq_table->nvec; vecidx++) {
154 err = irq_cpu_rmap_add(irq_table->rmap,
155 pci_irq_vector(mdev->pdev, vecidx));
156 if (err) {
157 mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
158 err);
159 goto err_irq_cpu_rmap_add;
160 }
161 }
162 return 0;
163
164 err_irq_cpu_rmap_add:
165 irq_clear_rmap(mdev);
166 err_out:
167 #endif
168 return err;
169 }
170
171 /* Completion IRQ vectors */
172
set_comp_irq_affinity_hint(struct mlx5_core_dev * mdev,int i)173 static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
174 {
175 int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
176 struct mlx5_irq *irq;
177 int irqn;
178
179 irq = mlx5_irq_get(mdev, vecidx);
180 irqn = pci_irq_vector(mdev->pdev, vecidx);
181 if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
182 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
183 return -ENOMEM;
184 }
185
186 cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
187 irq->mask);
188 if (IS_ENABLED(CONFIG_SMP) &&
189 irq_set_affinity_hint(irqn, irq->mask))
190 mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
191 irqn);
192
193 return 0;
194 }
195
clear_comp_irq_affinity_hint(struct mlx5_core_dev * mdev,int i)196 static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
197 {
198 int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
199 struct mlx5_irq *irq;
200 int irqn;
201
202 irq = mlx5_irq_get(mdev, vecidx);
203 irqn = pci_irq_vector(mdev->pdev, vecidx);
204 irq_set_affinity_hint(irqn, NULL);
205 free_cpumask_var(irq->mask);
206 }
207
set_comp_irq_affinity_hints(struct mlx5_core_dev * mdev)208 static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
209 {
210 int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
211 int err;
212 int i;
213
214 for (i = 0; i < nvec; i++) {
215 err = set_comp_irq_affinity_hint(mdev, i);
216 if (err)
217 goto err_out;
218 }
219
220 return 0;
221
222 err_out:
223 for (i--; i >= 0; i--)
224 clear_comp_irq_affinity_hint(mdev, i);
225
226 return err;
227 }
228
clear_comp_irqs_affinity_hints(struct mlx5_core_dev * mdev)229 static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
230 {
231 int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
232 int i;
233
234 for (i = 0; i < nvec; i++)
235 clear_comp_irq_affinity_hint(mdev, i);
236 }
237
238 struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table * irq_table,int vecidx)239 mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
240 {
241 return irq_table->irq[vecidx].mask;
242 }
243
244 #ifdef CONFIG_RFS_ACCEL
mlx5_irq_get_rmap(struct mlx5_irq_table * irq_table)245 struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
246 {
247 return irq_table->rmap;
248 }
249 #endif
250
unrequest_irqs(struct mlx5_core_dev * dev)251 static void unrequest_irqs(struct mlx5_core_dev *dev)
252 {
253 struct mlx5_irq_table *table = dev->priv.irq_table;
254 int i;
255
256 for (i = 0; i < table->nvec; i++)
257 free_irq(pci_irq_vector(dev->pdev, i),
258 &mlx5_irq_get(dev, i)->nh);
259 }
260
mlx5_irq_table_create(struct mlx5_core_dev * dev)261 int mlx5_irq_table_create(struct mlx5_core_dev *dev)
262 {
263 struct mlx5_priv *priv = &dev->priv;
264 struct mlx5_irq_table *table = priv->irq_table;
265 int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
266 MLX5_CAP_GEN(dev, max_num_eqs) :
267 1 << MLX5_CAP_GEN(dev, log_max_eq);
268 int nvec;
269 int err;
270
271 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
272 MLX5_IRQ_VEC_COMP_BASE;
273 nvec = min_t(int, nvec, num_eqs);
274 if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
275 return -ENOMEM;
276
277 table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
278 if (!table->irq)
279 return -ENOMEM;
280
281 nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
282 nvec, PCI_IRQ_MSIX);
283 if (nvec < 0) {
284 err = nvec;
285 goto err_free_irq;
286 }
287
288 table->nvec = nvec;
289
290 err = irq_set_rmap(dev);
291 if (err)
292 goto err_set_rmap;
293
294 err = request_irqs(dev, nvec);
295 if (err)
296 goto err_request_irqs;
297
298 err = set_comp_irq_affinity_hints(dev);
299 if (err) {
300 mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
301 goto err_set_affinity;
302 }
303
304 return 0;
305
306 err_set_affinity:
307 unrequest_irqs(dev);
308 err_request_irqs:
309 irq_clear_rmap(dev);
310 err_set_rmap:
311 pci_free_irq_vectors(dev->pdev);
312 err_free_irq:
313 kfree(table->irq);
314 return err;
315 }
316
mlx5_irq_table_destroy(struct mlx5_core_dev * dev)317 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
318 {
319 struct mlx5_irq_table *table = dev->priv.irq_table;
320 int i;
321
322 /* free_irq requires that affinity and rmap will be cleared
323 * before calling it. This is why there is asymmetry with set_rmap
324 * which should be called after alloc_irq but before request_irq.
325 */
326 irq_clear_rmap(dev);
327 clear_comp_irqs_affinity_hints(dev);
328 for (i = 0; i < table->nvec; i++)
329 free_irq(pci_irq_vector(dev->pdev, i),
330 &mlx5_irq_get(dev, i)->nh);
331 pci_free_irq_vectors(dev->pdev);
332 kfree(table->irq);
333 }
334
335