1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2 /*
3 * Copyright(c) 2015 - 2020 Intel Corporation.
4 */
5
6 #include <linux/topology.h>
7 #include <linux/cpumask.h>
8 #include <linux/module.h>
9 #include <linux/interrupt.h>
10 #include <linux/numa.h>
11
12 #include "hfi.h"
13 #include "affinity.h"
14 #include "sdma.h"
15 #include "trace.h"
16
17 struct hfi1_affinity_node_list node_affinity = {
18 .list = LIST_HEAD_INIT(node_affinity.list),
19 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
20 };
21
22 /* Name of IRQ types, indexed by enum irq_type */
23 static const char * const irq_type_names[] = {
24 "SDMA",
25 "RCVCTXT",
26 "NETDEVCTXT",
27 "GENERAL",
28 "OTHER",
29 };
30
31 /* Per NUMA node count of HFI devices */
32 static unsigned int *hfi1_per_node_cntr;
33
init_cpu_mask_set(struct cpu_mask_set * set)34 static inline void init_cpu_mask_set(struct cpu_mask_set *set)
35 {
36 cpumask_clear(&set->mask);
37 cpumask_clear(&set->used);
38 set->gen = 0;
39 }
40
41 /* Increment generation of CPU set if needed */
_cpu_mask_set_gen_inc(struct cpu_mask_set * set)42 static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
43 {
44 if (cpumask_equal(&set->mask, &set->used)) {
45 /*
46 * We've used up all the CPUs, bump up the generation
47 * and reset the 'used' map
48 */
49 set->gen++;
50 cpumask_clear(&set->used);
51 }
52 }
53
_cpu_mask_set_gen_dec(struct cpu_mask_set * set)54 static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
55 {
56 if (cpumask_empty(&set->used) && set->gen) {
57 set->gen--;
58 cpumask_copy(&set->used, &set->mask);
59 }
60 }
61
62 /* Get the first CPU from the list of unused CPUs in a CPU set data structure */
cpu_mask_set_get_first(struct cpu_mask_set * set,cpumask_var_t diff)63 static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
64 {
65 int cpu;
66
67 if (!diff || !set)
68 return -EINVAL;
69
70 _cpu_mask_set_gen_inc(set);
71
72 /* Find out CPUs left in CPU mask */
73 cpumask_andnot(diff, &set->mask, &set->used);
74
75 cpu = cpumask_first(diff);
76 if (cpu >= nr_cpu_ids) /* empty */
77 cpu = -EINVAL;
78 else
79 cpumask_set_cpu(cpu, &set->used);
80
81 return cpu;
82 }
83
cpu_mask_set_put(struct cpu_mask_set * set,int cpu)84 static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
85 {
86 if (!set)
87 return;
88
89 cpumask_clear_cpu(cpu, &set->used);
90 _cpu_mask_set_gen_dec(set);
91 }
92
93 /* Initialize non-HT cpu cores mask */
init_real_cpu_mask(void)94 void init_real_cpu_mask(void)
95 {
96 int possible, curr_cpu, i, ht;
97
98 cpumask_clear(&node_affinity.real_cpu_mask);
99
100 /* Start with cpu online mask as the real cpu mask */
101 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
102
103 /*
104 * Remove HT cores from the real cpu mask. Do this in two steps below.
105 */
106 possible = cpumask_weight(&node_affinity.real_cpu_mask);
107 ht = cpumask_weight(topology_sibling_cpumask(
108 cpumask_first(&node_affinity.real_cpu_mask)));
109 /*
110 * Step 1. Skip over the first N HT siblings and use them as the
111 * "real" cores. Assumes that HT cores are not enumerated in
112 * succession (except in the single core case).
113 */
114 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
115 for (i = 0; i < possible / ht; i++)
116 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
117 /*
118 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
119 * skip any gaps.
120 */
121 for (; i < possible; i++) {
122 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
123 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
124 }
125 }
126
node_affinity_init(void)127 int node_affinity_init(void)
128 {
129 int node;
130 struct pci_dev *dev = NULL;
131 const struct pci_device_id *ids = hfi1_pci_tbl;
132
133 cpumask_clear(&node_affinity.proc.used);
134 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
135
136 node_affinity.proc.gen = 0;
137 node_affinity.num_core_siblings =
138 cpumask_weight(topology_sibling_cpumask(
139 cpumask_first(&node_affinity.proc.mask)
140 ));
141 node_affinity.num_possible_nodes = num_possible_nodes();
142 node_affinity.num_online_nodes = num_online_nodes();
143 node_affinity.num_online_cpus = num_online_cpus();
144
145 /*
146 * The real cpu mask is part of the affinity struct but it has to be
147 * initialized early. It is needed to calculate the number of user
148 * contexts in set_up_context_variables().
149 */
150 init_real_cpu_mask();
151
152 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
153 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
154 if (!hfi1_per_node_cntr)
155 return -ENOMEM;
156
157 while (ids->vendor) {
158 dev = NULL;
159 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
160 node = pcibus_to_node(dev->bus);
161 if (node < 0)
162 goto out;
163
164 hfi1_per_node_cntr[node]++;
165 }
166 ids++;
167 }
168
169 return 0;
170
171 out:
172 /*
173 * Invalid PCI NUMA node information found, note it, and populate
174 * our database 1:1.
175 */
176 pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
177 pr_err("HFI: System BIOS may need to be upgraded\n");
178 for (node = 0; node < node_affinity.num_possible_nodes; node++)
179 hfi1_per_node_cntr[node] = 1;
180
181 pci_dev_put(dev);
182
183 return 0;
184 }
185
node_affinity_destroy(struct hfi1_affinity_node * entry)186 static void node_affinity_destroy(struct hfi1_affinity_node *entry)
187 {
188 free_percpu(entry->comp_vect_affinity);
189 kfree(entry);
190 }
191
node_affinity_destroy_all(void)192 void node_affinity_destroy_all(void)
193 {
194 struct list_head *pos, *q;
195 struct hfi1_affinity_node *entry;
196
197 mutex_lock(&node_affinity.lock);
198 list_for_each_safe(pos, q, &node_affinity.list) {
199 entry = list_entry(pos, struct hfi1_affinity_node,
200 list);
201 list_del(pos);
202 node_affinity_destroy(entry);
203 }
204 mutex_unlock(&node_affinity.lock);
205 kfree(hfi1_per_node_cntr);
206 }
207
node_affinity_allocate(int node)208 static struct hfi1_affinity_node *node_affinity_allocate(int node)
209 {
210 struct hfi1_affinity_node *entry;
211
212 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
213 if (!entry)
214 return NULL;
215 entry->node = node;
216 entry->comp_vect_affinity = alloc_percpu(u16);
217 INIT_LIST_HEAD(&entry->list);
218
219 return entry;
220 }
221
222 /*
223 * It appends an entry to the list.
224 * It *must* be called with node_affinity.lock held.
225 */
node_affinity_add_tail(struct hfi1_affinity_node * entry)226 static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
227 {
228 list_add_tail(&entry->list, &node_affinity.list);
229 }
230
231 /* It must be called with node_affinity.lock held */
node_affinity_lookup(int node)232 static struct hfi1_affinity_node *node_affinity_lookup(int node)
233 {
234 struct list_head *pos;
235 struct hfi1_affinity_node *entry;
236
237 list_for_each(pos, &node_affinity.list) {
238 entry = list_entry(pos, struct hfi1_affinity_node, list);
239 if (entry->node == node)
240 return entry;
241 }
242
243 return NULL;
244 }
245
per_cpu_affinity_get(cpumask_var_t possible_cpumask,u16 __percpu * comp_vect_affinity)246 static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
247 u16 __percpu *comp_vect_affinity)
248 {
249 int curr_cpu;
250 u16 cntr;
251 u16 prev_cntr;
252 int ret_cpu;
253
254 if (!possible_cpumask) {
255 ret_cpu = -EINVAL;
256 goto fail;
257 }
258
259 if (!comp_vect_affinity) {
260 ret_cpu = -EINVAL;
261 goto fail;
262 }
263
264 ret_cpu = cpumask_first(possible_cpumask);
265 if (ret_cpu >= nr_cpu_ids) {
266 ret_cpu = -EINVAL;
267 goto fail;
268 }
269
270 prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
271 for_each_cpu(curr_cpu, possible_cpumask) {
272 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
273
274 if (cntr < prev_cntr) {
275 ret_cpu = curr_cpu;
276 prev_cntr = cntr;
277 }
278 }
279
280 *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
281
282 fail:
283 return ret_cpu;
284 }
285
per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,u16 __percpu * comp_vect_affinity)286 static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
287 u16 __percpu *comp_vect_affinity)
288 {
289 int curr_cpu;
290 int max_cpu;
291 u16 cntr;
292 u16 prev_cntr;
293
294 if (!possible_cpumask)
295 return -EINVAL;
296
297 if (!comp_vect_affinity)
298 return -EINVAL;
299
300 max_cpu = cpumask_first(possible_cpumask);
301 if (max_cpu >= nr_cpu_ids)
302 return -EINVAL;
303
304 prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
305 for_each_cpu(curr_cpu, possible_cpumask) {
306 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
307
308 if (cntr > prev_cntr) {
309 max_cpu = curr_cpu;
310 prev_cntr = cntr;
311 }
312 }
313
314 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
315
316 return max_cpu;
317 }
318
319 /*
320 * Non-interrupt CPUs are used first, then interrupt CPUs.
321 * Two already allocated cpu masks must be passed.
322 */
_dev_comp_vect_cpu_get(struct hfi1_devdata * dd,struct hfi1_affinity_node * entry,cpumask_var_t non_intr_cpus,cpumask_var_t available_cpus)323 static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
324 struct hfi1_affinity_node *entry,
325 cpumask_var_t non_intr_cpus,
326 cpumask_var_t available_cpus)
327 __must_hold(&node_affinity.lock)
328 {
329 int cpu;
330 struct cpu_mask_set *set = dd->comp_vect;
331
332 lockdep_assert_held(&node_affinity.lock);
333 if (!non_intr_cpus) {
334 cpu = -1;
335 goto fail;
336 }
337
338 if (!available_cpus) {
339 cpu = -1;
340 goto fail;
341 }
342
343 /* Available CPUs for pinning completion vectors */
344 _cpu_mask_set_gen_inc(set);
345 cpumask_andnot(available_cpus, &set->mask, &set->used);
346
347 /* Available CPUs without SDMA engine interrupts */
348 cpumask_andnot(non_intr_cpus, available_cpus,
349 &entry->def_intr.used);
350
351 /* If there are non-interrupt CPUs available, use them first */
352 if (!cpumask_empty(non_intr_cpus))
353 cpu = cpumask_first(non_intr_cpus);
354 else /* Otherwise, use interrupt CPUs */
355 cpu = cpumask_first(available_cpus);
356
357 if (cpu >= nr_cpu_ids) { /* empty */
358 cpu = -1;
359 goto fail;
360 }
361 cpumask_set_cpu(cpu, &set->used);
362
363 fail:
364 return cpu;
365 }
366
_dev_comp_vect_cpu_put(struct hfi1_devdata * dd,int cpu)367 static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
368 {
369 struct cpu_mask_set *set = dd->comp_vect;
370
371 if (cpu < 0)
372 return;
373
374 cpu_mask_set_put(set, cpu);
375 }
376
377 /* _dev_comp_vect_mappings_destroy() is reentrant */
_dev_comp_vect_mappings_destroy(struct hfi1_devdata * dd)378 static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
379 {
380 int i, cpu;
381
382 if (!dd->comp_vect_mappings)
383 return;
384
385 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
386 cpu = dd->comp_vect_mappings[i];
387 _dev_comp_vect_cpu_put(dd, cpu);
388 dd->comp_vect_mappings[i] = -1;
389 hfi1_cdbg(AFFINITY,
390 "[%s] Release CPU %d from completion vector %d",
391 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
392 }
393
394 kfree(dd->comp_vect_mappings);
395 dd->comp_vect_mappings = NULL;
396 }
397
398 /*
399 * This function creates the table for looking up CPUs for completion vectors.
400 * num_comp_vectors needs to have been initilized before calling this function.
401 */
_dev_comp_vect_mappings_create(struct hfi1_devdata * dd,struct hfi1_affinity_node * entry)402 static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
403 struct hfi1_affinity_node *entry)
404 __must_hold(&node_affinity.lock)
405 {
406 int i, cpu, ret;
407 cpumask_var_t non_intr_cpus;
408 cpumask_var_t available_cpus;
409
410 lockdep_assert_held(&node_affinity.lock);
411
412 if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
413 return -ENOMEM;
414
415 if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
416 free_cpumask_var(non_intr_cpus);
417 return -ENOMEM;
418 }
419
420 dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
421 sizeof(*dd->comp_vect_mappings),
422 GFP_KERNEL);
423 if (!dd->comp_vect_mappings) {
424 ret = -ENOMEM;
425 goto fail;
426 }
427 for (i = 0; i < dd->comp_vect_possible_cpus; i++)
428 dd->comp_vect_mappings[i] = -1;
429
430 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
431 cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
432 available_cpus);
433 if (cpu < 0) {
434 ret = -EINVAL;
435 goto fail;
436 }
437
438 dd->comp_vect_mappings[i] = cpu;
439 hfi1_cdbg(AFFINITY,
440 "[%s] Completion Vector %d -> CPU %d",
441 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
442 }
443
444 free_cpumask_var(available_cpus);
445 free_cpumask_var(non_intr_cpus);
446 return 0;
447
448 fail:
449 free_cpumask_var(available_cpus);
450 free_cpumask_var(non_intr_cpus);
451 _dev_comp_vect_mappings_destroy(dd);
452
453 return ret;
454 }
455
hfi1_comp_vectors_set_up(struct hfi1_devdata * dd)456 int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
457 {
458 int ret;
459 struct hfi1_affinity_node *entry;
460
461 mutex_lock(&node_affinity.lock);
462 entry = node_affinity_lookup(dd->node);
463 if (!entry) {
464 ret = -EINVAL;
465 goto unlock;
466 }
467 ret = _dev_comp_vect_mappings_create(dd, entry);
468 unlock:
469 mutex_unlock(&node_affinity.lock);
470
471 return ret;
472 }
473
hfi1_comp_vectors_clean_up(struct hfi1_devdata * dd)474 void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
475 {
476 _dev_comp_vect_mappings_destroy(dd);
477 }
478
hfi1_comp_vect_mappings_lookup(struct rvt_dev_info * rdi,int comp_vect)479 int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
480 {
481 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
482 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
483
484 if (!dd->comp_vect_mappings)
485 return -EINVAL;
486 if (comp_vect >= dd->comp_vect_possible_cpus)
487 return -EINVAL;
488
489 return dd->comp_vect_mappings[comp_vect];
490 }
491
492 /*
493 * It assumes dd->comp_vect_possible_cpus is available.
494 */
_dev_comp_vect_cpu_mask_init(struct hfi1_devdata * dd,struct hfi1_affinity_node * entry,bool first_dev_init)495 static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
496 struct hfi1_affinity_node *entry,
497 bool first_dev_init)
498 __must_hold(&node_affinity.lock)
499 {
500 int i, j, curr_cpu;
501 int possible_cpus_comp_vect = 0;
502 struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
503
504 lockdep_assert_held(&node_affinity.lock);
505 /*
506 * If there's only one CPU available for completion vectors, then
507 * there will only be one completion vector available. Othewise,
508 * the number of completion vector available will be the number of
509 * available CPUs divide it by the number of devices in the
510 * local NUMA node.
511 */
512 if (cpumask_weight(&entry->comp_vect_mask) == 1) {
513 possible_cpus_comp_vect = 1;
514 dd_dev_warn(dd,
515 "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
516 } else {
517 possible_cpus_comp_vect +=
518 cpumask_weight(&entry->comp_vect_mask) /
519 hfi1_per_node_cntr[dd->node];
520
521 /*
522 * If the completion vector CPUs available doesn't divide
523 * evenly among devices, then the first device device to be
524 * initialized gets an extra CPU.
525 */
526 if (first_dev_init &&
527 cpumask_weight(&entry->comp_vect_mask) %
528 hfi1_per_node_cntr[dd->node] != 0)
529 possible_cpus_comp_vect++;
530 }
531
532 dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
533
534 /* Reserving CPUs for device completion vector */
535 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
536 curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
537 entry->comp_vect_affinity);
538 if (curr_cpu < 0)
539 goto fail;
540
541 cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
542 }
543
544 hfi1_cdbg(AFFINITY,
545 "[%s] Completion vector affinity CPU set(s) %*pbl",
546 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
547 cpumask_pr_args(dev_comp_vect_mask));
548
549 return 0;
550
551 fail:
552 for (j = 0; j < i; j++)
553 per_cpu_affinity_put_max(&entry->comp_vect_mask,
554 entry->comp_vect_affinity);
555
556 return curr_cpu;
557 }
558
559 /*
560 * It assumes dd->comp_vect_possible_cpus is available.
561 */
_dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata * dd,struct hfi1_affinity_node * entry)562 static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
563 struct hfi1_affinity_node *entry)
564 __must_hold(&node_affinity.lock)
565 {
566 int i, cpu;
567
568 lockdep_assert_held(&node_affinity.lock);
569 if (!dd->comp_vect_possible_cpus)
570 return;
571
572 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
573 cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
574 entry->comp_vect_affinity);
575 /* Clearing CPU in device completion vector cpu mask */
576 if (cpu >= 0)
577 cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
578 }
579
580 dd->comp_vect_possible_cpus = 0;
581 }
582
583 /*
584 * Interrupt affinity.
585 *
586 * non-rcv avail gets a default mask that
587 * starts as possible cpus with threads reset
588 * and each rcv avail reset.
589 *
590 * rcv avail gets node relative 1 wrapping back
591 * to the node relative 1 as necessary.
592 *
593 */
hfi1_dev_affinity_init(struct hfi1_devdata * dd)594 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
595 {
596 struct hfi1_affinity_node *entry;
597 const struct cpumask *local_mask;
598 int curr_cpu, possible, i, ret;
599 bool new_entry = false;
600
601 local_mask = cpumask_of_node(dd->node);
602 if (cpumask_first(local_mask) >= nr_cpu_ids)
603 local_mask = topology_core_cpumask(0);
604
605 mutex_lock(&node_affinity.lock);
606 entry = node_affinity_lookup(dd->node);
607
608 /*
609 * If this is the first time this NUMA node's affinity is used,
610 * create an entry in the global affinity structure and initialize it.
611 */
612 if (!entry) {
613 entry = node_affinity_allocate(dd->node);
614 if (!entry) {
615 dd_dev_err(dd,
616 "Unable to allocate global affinity node\n");
617 ret = -ENOMEM;
618 goto fail;
619 }
620 new_entry = true;
621
622 init_cpu_mask_set(&entry->def_intr);
623 init_cpu_mask_set(&entry->rcv_intr);
624 cpumask_clear(&entry->comp_vect_mask);
625 cpumask_clear(&entry->general_intr_mask);
626 /* Use the "real" cpu mask of this node as the default */
627 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
628 local_mask);
629
630 /* fill in the receive list */
631 possible = cpumask_weight(&entry->def_intr.mask);
632 curr_cpu = cpumask_first(&entry->def_intr.mask);
633
634 if (possible == 1) {
635 /* only one CPU, everyone will use it */
636 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
637 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
638 } else {
639 /*
640 * The general/control context will be the first CPU in
641 * the default list, so it is removed from the default
642 * list and added to the general interrupt list.
643 */
644 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
645 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
646 curr_cpu = cpumask_next(curr_cpu,
647 &entry->def_intr.mask);
648
649 /*
650 * Remove the remaining kernel receive queues from
651 * the default list and add them to the receive list.
652 */
653 for (i = 0;
654 i < (dd->n_krcv_queues - 1) *
655 hfi1_per_node_cntr[dd->node];
656 i++) {
657 cpumask_clear_cpu(curr_cpu,
658 &entry->def_intr.mask);
659 cpumask_set_cpu(curr_cpu,
660 &entry->rcv_intr.mask);
661 curr_cpu = cpumask_next(curr_cpu,
662 &entry->def_intr.mask);
663 if (curr_cpu >= nr_cpu_ids)
664 break;
665 }
666
667 /*
668 * If there ends up being 0 CPU cores leftover for SDMA
669 * engines, use the same CPU cores as general/control
670 * context.
671 */
672 if (cpumask_weight(&entry->def_intr.mask) == 0)
673 cpumask_copy(&entry->def_intr.mask,
674 &entry->general_intr_mask);
675 }
676
677 /* Determine completion vector CPUs for the entire node */
678 cpumask_and(&entry->comp_vect_mask,
679 &node_affinity.real_cpu_mask, local_mask);
680 cpumask_andnot(&entry->comp_vect_mask,
681 &entry->comp_vect_mask,
682 &entry->rcv_intr.mask);
683 cpumask_andnot(&entry->comp_vect_mask,
684 &entry->comp_vect_mask,
685 &entry->general_intr_mask);
686
687 /*
688 * If there ends up being 0 CPU cores leftover for completion
689 * vectors, use the same CPU core as the general/control
690 * context.
691 */
692 if (cpumask_weight(&entry->comp_vect_mask) == 0)
693 cpumask_copy(&entry->comp_vect_mask,
694 &entry->general_intr_mask);
695 }
696
697 ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
698 if (ret < 0)
699 goto fail;
700
701 if (new_entry)
702 node_affinity_add_tail(entry);
703
704 dd->affinity_entry = entry;
705 mutex_unlock(&node_affinity.lock);
706
707 return 0;
708
709 fail:
710 if (new_entry)
711 node_affinity_destroy(entry);
712 mutex_unlock(&node_affinity.lock);
713 return ret;
714 }
715
hfi1_dev_affinity_clean_up(struct hfi1_devdata * dd)716 void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
717 {
718 struct hfi1_affinity_node *entry;
719
720 mutex_lock(&node_affinity.lock);
721 if (!dd->affinity_entry)
722 goto unlock;
723 entry = node_affinity_lookup(dd->node);
724 if (!entry)
725 goto unlock;
726
727 /*
728 * Free device completion vector CPUs to be used by future
729 * completion vectors
730 */
731 _dev_comp_vect_cpu_mask_clean_up(dd, entry);
732 unlock:
733 dd->affinity_entry = NULL;
734 mutex_unlock(&node_affinity.lock);
735 }
736
737 /*
738 * Function updates the irq affinity hint for msix after it has been changed
739 * by the user using the /proc/irq interface. This function only accepts
740 * one cpu in the mask.
741 */
hfi1_update_sdma_affinity(struct hfi1_msix_entry * msix,int cpu)742 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
743 {
744 struct sdma_engine *sde = msix->arg;
745 struct hfi1_devdata *dd = sde->dd;
746 struct hfi1_affinity_node *entry;
747 struct cpu_mask_set *set;
748 int i, old_cpu;
749
750 if (cpu > num_online_cpus() || cpu == sde->cpu)
751 return;
752
753 mutex_lock(&node_affinity.lock);
754 entry = node_affinity_lookup(dd->node);
755 if (!entry)
756 goto unlock;
757
758 old_cpu = sde->cpu;
759 sde->cpu = cpu;
760 cpumask_clear(&msix->mask);
761 cpumask_set_cpu(cpu, &msix->mask);
762 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
763 msix->irq, irq_type_names[msix->type],
764 sde->this_idx, cpu);
765 irq_set_affinity_hint(msix->irq, &msix->mask);
766
767 /*
768 * Set the new cpu in the hfi1_affinity_node and clean
769 * the old cpu if it is not used by any other IRQ
770 */
771 set = &entry->def_intr;
772 cpumask_set_cpu(cpu, &set->mask);
773 cpumask_set_cpu(cpu, &set->used);
774 for (i = 0; i < dd->msix_info.max_requested; i++) {
775 struct hfi1_msix_entry *other_msix;
776
777 other_msix = &dd->msix_info.msix_entries[i];
778 if (other_msix->type != IRQ_SDMA || other_msix == msix)
779 continue;
780
781 if (cpumask_test_cpu(old_cpu, &other_msix->mask))
782 goto unlock;
783 }
784 cpumask_clear_cpu(old_cpu, &set->mask);
785 cpumask_clear_cpu(old_cpu, &set->used);
786 unlock:
787 mutex_unlock(&node_affinity.lock);
788 }
789
hfi1_irq_notifier_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)790 static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
791 const cpumask_t *mask)
792 {
793 int cpu = cpumask_first(mask);
794 struct hfi1_msix_entry *msix = container_of(notify,
795 struct hfi1_msix_entry,
796 notify);
797
798 /* Only one CPU configuration supported currently */
799 hfi1_update_sdma_affinity(msix, cpu);
800 }
801
hfi1_irq_notifier_release(struct kref * ref)802 static void hfi1_irq_notifier_release(struct kref *ref)
803 {
804 /*
805 * This is required by affinity notifier. We don't have anything to
806 * free here.
807 */
808 }
809
hfi1_setup_sdma_notifier(struct hfi1_msix_entry * msix)810 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
811 {
812 struct irq_affinity_notify *notify = &msix->notify;
813
814 notify->irq = msix->irq;
815 notify->notify = hfi1_irq_notifier_notify;
816 notify->release = hfi1_irq_notifier_release;
817
818 if (irq_set_affinity_notifier(notify->irq, notify))
819 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
820 notify->irq);
821 }
822
hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry * msix)823 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
824 {
825 struct irq_affinity_notify *notify = &msix->notify;
826
827 if (irq_set_affinity_notifier(notify->irq, NULL))
828 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
829 notify->irq);
830 }
831
832 /*
833 * Function sets the irq affinity for msix.
834 * It *must* be called with node_affinity.lock held.
835 */
get_irq_affinity(struct hfi1_devdata * dd,struct hfi1_msix_entry * msix)836 static int get_irq_affinity(struct hfi1_devdata *dd,
837 struct hfi1_msix_entry *msix)
838 {
839 cpumask_var_t diff;
840 struct hfi1_affinity_node *entry;
841 struct cpu_mask_set *set = NULL;
842 struct sdma_engine *sde = NULL;
843 struct hfi1_ctxtdata *rcd = NULL;
844 char extra[64];
845 int cpu = -1;
846
847 extra[0] = '\0';
848 cpumask_clear(&msix->mask);
849
850 entry = node_affinity_lookup(dd->node);
851
852 switch (msix->type) {
853 case IRQ_SDMA:
854 sde = (struct sdma_engine *)msix->arg;
855 scnprintf(extra, 64, "engine %u", sde->this_idx);
856 set = &entry->def_intr;
857 break;
858 case IRQ_GENERAL:
859 cpu = cpumask_first(&entry->general_intr_mask);
860 break;
861 case IRQ_RCVCTXT:
862 rcd = (struct hfi1_ctxtdata *)msix->arg;
863 if (rcd->ctxt == HFI1_CTRL_CTXT)
864 cpu = cpumask_first(&entry->general_intr_mask);
865 else
866 set = &entry->rcv_intr;
867 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
868 break;
869 case IRQ_NETDEVCTXT:
870 rcd = (struct hfi1_ctxtdata *)msix->arg;
871 set = &entry->def_intr;
872 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
873 break;
874 default:
875 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
876 return -EINVAL;
877 }
878
879 /*
880 * The general and control contexts are placed on a particular
881 * CPU, which is set above. Skip accounting for it. Everything else
882 * finds its CPU here.
883 */
884 if (cpu == -1 && set) {
885 if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
886 return -ENOMEM;
887
888 cpu = cpu_mask_set_get_first(set, diff);
889 if (cpu < 0) {
890 free_cpumask_var(diff);
891 dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
892 return cpu;
893 }
894
895 free_cpumask_var(diff);
896 }
897
898 cpumask_set_cpu(cpu, &msix->mask);
899 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
900 msix->irq, irq_type_names[msix->type],
901 extra, cpu);
902 irq_set_affinity_hint(msix->irq, &msix->mask);
903
904 if (msix->type == IRQ_SDMA) {
905 sde->cpu = cpu;
906 hfi1_setup_sdma_notifier(msix);
907 }
908
909 return 0;
910 }
911
hfi1_get_irq_affinity(struct hfi1_devdata * dd,struct hfi1_msix_entry * msix)912 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
913 {
914 int ret;
915
916 mutex_lock(&node_affinity.lock);
917 ret = get_irq_affinity(dd, msix);
918 mutex_unlock(&node_affinity.lock);
919 return ret;
920 }
921
hfi1_put_irq_affinity(struct hfi1_devdata * dd,struct hfi1_msix_entry * msix)922 void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
923 struct hfi1_msix_entry *msix)
924 {
925 struct cpu_mask_set *set = NULL;
926 struct hfi1_affinity_node *entry;
927
928 mutex_lock(&node_affinity.lock);
929 entry = node_affinity_lookup(dd->node);
930
931 switch (msix->type) {
932 case IRQ_SDMA:
933 set = &entry->def_intr;
934 hfi1_cleanup_sdma_notifier(msix);
935 break;
936 case IRQ_GENERAL:
937 /* Don't do accounting for general contexts */
938 break;
939 case IRQ_RCVCTXT: {
940 struct hfi1_ctxtdata *rcd = msix->arg;
941
942 /* Don't do accounting for control contexts */
943 if (rcd->ctxt != HFI1_CTRL_CTXT)
944 set = &entry->rcv_intr;
945 break;
946 }
947 case IRQ_NETDEVCTXT:
948 set = &entry->def_intr;
949 break;
950 default:
951 mutex_unlock(&node_affinity.lock);
952 return;
953 }
954
955 if (set) {
956 cpumask_andnot(&set->used, &set->used, &msix->mask);
957 _cpu_mask_set_gen_dec(set);
958 }
959
960 irq_set_affinity_hint(msix->irq, NULL);
961 cpumask_clear(&msix->mask);
962 mutex_unlock(&node_affinity.lock);
963 }
964
965 /* This should be called with node_affinity.lock held */
find_hw_thread_mask(uint hw_thread_no,cpumask_var_t hw_thread_mask,struct hfi1_affinity_node_list * affinity)966 static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
967 struct hfi1_affinity_node_list *affinity)
968 {
969 int possible, curr_cpu, i;
970 uint num_cores_per_socket = node_affinity.num_online_cpus /
971 affinity->num_core_siblings /
972 node_affinity.num_online_nodes;
973
974 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
975 if (affinity->num_core_siblings > 0) {
976 /* Removing other siblings not needed for now */
977 possible = cpumask_weight(hw_thread_mask);
978 curr_cpu = cpumask_first(hw_thread_mask);
979 for (i = 0;
980 i < num_cores_per_socket * node_affinity.num_online_nodes;
981 i++)
982 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
983
984 for (; i < possible; i++) {
985 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
986 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
987 }
988
989 /* Identifying correct HW threads within physical cores */
990 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
991 num_cores_per_socket *
992 node_affinity.num_online_nodes *
993 hw_thread_no);
994 }
995 }
996
hfi1_get_proc_affinity(int node)997 int hfi1_get_proc_affinity(int node)
998 {
999 int cpu = -1, ret, i;
1000 struct hfi1_affinity_node *entry;
1001 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
1002 const struct cpumask *node_mask,
1003 *proc_mask = current->cpus_ptr;
1004 struct hfi1_affinity_node_list *affinity = &node_affinity;
1005 struct cpu_mask_set *set = &affinity->proc;
1006
1007 /*
1008 * check whether process/context affinity has already
1009 * been set
1010 */
1011 if (current->nr_cpus_allowed == 1) {
1012 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
1013 current->pid, current->comm,
1014 cpumask_pr_args(proc_mask));
1015 /*
1016 * Mark the pre-set CPU as used. This is atomic so we don't
1017 * need the lock
1018 */
1019 cpu = cpumask_first(proc_mask);
1020 cpumask_set_cpu(cpu, &set->used);
1021 goto done;
1022 } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
1023 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
1024 current->pid, current->comm,
1025 cpumask_pr_args(proc_mask));
1026 goto done;
1027 }
1028
1029 /*
1030 * The process does not have a preset CPU affinity so find one to
1031 * recommend using the following algorithm:
1032 *
1033 * For each user process that is opening a context on HFI Y:
1034 * a) If all cores are filled, reinitialize the bitmask
1035 * b) Fill real cores first, then HT cores (First set of HT
1036 * cores on all physical cores, then second set of HT core,
1037 * and, so on) in the following order:
1038 *
1039 * 1. Same NUMA node as HFI Y and not running an IRQ
1040 * handler
1041 * 2. Same NUMA node as HFI Y and running an IRQ handler
1042 * 3. Different NUMA node to HFI Y and not running an IRQ
1043 * handler
1044 * 4. Different NUMA node to HFI Y and running an IRQ
1045 * handler
1046 * c) Mark core as filled in the bitmask. As user processes are
1047 * done, clear cores from the bitmask.
1048 */
1049
1050 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
1051 if (!ret)
1052 goto done;
1053 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
1054 if (!ret)
1055 goto free_diff;
1056 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
1057 if (!ret)
1058 goto free_hw_thread_mask;
1059 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
1060 if (!ret)
1061 goto free_available_mask;
1062
1063 mutex_lock(&affinity->lock);
1064 /*
1065 * If we've used all available HW threads, clear the mask and start
1066 * overloading.
1067 */
1068 _cpu_mask_set_gen_inc(set);
1069
1070 /*
1071 * If NUMA node has CPUs used by interrupt handlers, include them in the
1072 * interrupt handler mask.
1073 */
1074 entry = node_affinity_lookup(node);
1075 if (entry) {
1076 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
1077 &entry->def_intr.mask :
1078 &entry->def_intr.used));
1079 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
1080 &entry->rcv_intr.mask :
1081 &entry->rcv_intr.used));
1082 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
1083 }
1084 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
1085 cpumask_pr_args(intrs_mask));
1086
1087 cpumask_copy(hw_thread_mask, &set->mask);
1088
1089 /*
1090 * If HT cores are enabled, identify which HW threads within the
1091 * physical cores should be used.
1092 */
1093 if (affinity->num_core_siblings > 0) {
1094 for (i = 0; i < affinity->num_core_siblings; i++) {
1095 find_hw_thread_mask(i, hw_thread_mask, affinity);
1096
1097 /*
1098 * If there's at least one available core for this HW
1099 * thread number, stop looking for a core.
1100 *
1101 * diff will always be not empty at least once in this
1102 * loop as the used mask gets reset when
1103 * (set->mask == set->used) before this loop.
1104 */
1105 cpumask_andnot(diff, hw_thread_mask, &set->used);
1106 if (!cpumask_empty(diff))
1107 break;
1108 }
1109 }
1110 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
1111 cpumask_pr_args(hw_thread_mask));
1112
1113 node_mask = cpumask_of_node(node);
1114 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
1115 cpumask_pr_args(node_mask));
1116
1117 /* Get cpumask of available CPUs on preferred NUMA */
1118 cpumask_and(available_mask, hw_thread_mask, node_mask);
1119 cpumask_andnot(available_mask, available_mask, &set->used);
1120 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
1121 cpumask_pr_args(available_mask));
1122
1123 /*
1124 * At first, we don't want to place processes on the same
1125 * CPUs as interrupt handlers. Then, CPUs running interrupt
1126 * handlers are used.
1127 *
1128 * 1) If diff is not empty, then there are CPUs not running
1129 * non-interrupt handlers available, so diff gets copied
1130 * over to available_mask.
1131 * 2) If diff is empty, then all CPUs not running interrupt
1132 * handlers are taken, so available_mask contains all
1133 * available CPUs running interrupt handlers.
1134 * 3) If available_mask is empty, then all CPUs on the
1135 * preferred NUMA node are taken, so other NUMA nodes are
1136 * used for process assignments using the same method as
1137 * the preferred NUMA node.
1138 */
1139 cpumask_andnot(diff, available_mask, intrs_mask);
1140 if (!cpumask_empty(diff))
1141 cpumask_copy(available_mask, diff);
1142
1143 /* If we don't have CPUs on the preferred node, use other NUMA nodes */
1144 if (cpumask_empty(available_mask)) {
1145 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
1146 /* Excluding preferred NUMA cores */
1147 cpumask_andnot(available_mask, available_mask, node_mask);
1148 hfi1_cdbg(PROC,
1149 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
1150 cpumask_pr_args(available_mask));
1151
1152 /*
1153 * At first, we don't want to place processes on the same
1154 * CPUs as interrupt handlers.
1155 */
1156 cpumask_andnot(diff, available_mask, intrs_mask);
1157 if (!cpumask_empty(diff))
1158 cpumask_copy(available_mask, diff);
1159 }
1160 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
1161 cpumask_pr_args(available_mask));
1162
1163 cpu = cpumask_first(available_mask);
1164 if (cpu >= nr_cpu_ids) /* empty */
1165 cpu = -1;
1166 else
1167 cpumask_set_cpu(cpu, &set->used);
1168
1169 mutex_unlock(&affinity->lock);
1170 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
1171
1172 free_cpumask_var(intrs_mask);
1173 free_available_mask:
1174 free_cpumask_var(available_mask);
1175 free_hw_thread_mask:
1176 free_cpumask_var(hw_thread_mask);
1177 free_diff:
1178 free_cpumask_var(diff);
1179 done:
1180 return cpu;
1181 }
1182
hfi1_put_proc_affinity(int cpu)1183 void hfi1_put_proc_affinity(int cpu)
1184 {
1185 struct hfi1_affinity_node_list *affinity = &node_affinity;
1186 struct cpu_mask_set *set = &affinity->proc;
1187
1188 if (cpu < 0)
1189 return;
1190
1191 mutex_lock(&affinity->lock);
1192 cpu_mask_set_put(set, cpu);
1193 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
1194 mutex_unlock(&affinity->lock);
1195 }
1196