• Home
  • Raw
  • Download

Lines Matching +full:interrupt +full:- +full:affinity

2  * Copyright(c) 2015 - 2018 Intel Corporation.
24 * - Redistributions of source code must retain the above copyright
26 * - Redistributions in binary form must reproduce the above copyright
30 * - Neither the name of Intel Corporation nor the names of its
50 #include <linux/interrupt.h>
53 #include "affinity.h"
75 cpumask_clear(&set->mask); in init_cpu_mask_set()
76 cpumask_clear(&set->used); in init_cpu_mask_set()
77 set->gen = 0; in init_cpu_mask_set()
83 if (cpumask_equal(&set->mask, &set->used)) { in _cpu_mask_set_gen_inc()
88 set->gen++; in _cpu_mask_set_gen_inc()
89 cpumask_clear(&set->used); in _cpu_mask_set_gen_inc()
95 if (cpumask_empty(&set->used) && set->gen) { in _cpu_mask_set_gen_dec()
96 set->gen--; in _cpu_mask_set_gen_dec()
97 cpumask_copy(&set->used, &set->mask); in _cpu_mask_set_gen_dec()
107 return -EINVAL; in cpu_mask_set_get_first()
112 cpumask_andnot(diff, &set->mask, &set->used); in cpu_mask_set_get_first()
116 cpu = -EINVAL; in cpu_mask_set_get_first()
118 cpumask_set_cpu(cpu, &set->used); in cpu_mask_set_get_first()
128 cpumask_clear_cpu(cpu, &set->used); in cpu_mask_set_put()
132 /* Initialize non-HT cpu cores mask */
185 * The real cpu mask is part of the affinity struct but it has to be in node_affinity_init()
194 return -ENOMEM; in node_affinity_init()
196 while (ids->vendor) { in node_affinity_init()
198 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { in node_affinity_init()
199 node = pcibus_to_node(dev->bus); in node_affinity_init()
225 free_percpu(entry->comp_vect_affinity); in node_affinity_destroy()
252 entry->node = node; in node_affinity_allocate()
253 entry->comp_vect_affinity = alloc_percpu(u16); in node_affinity_allocate()
254 INIT_LIST_HEAD(&entry->list); in node_affinity_allocate()
265 list_add_tail(&entry->list, &node_affinity.list); in node_affinity_add_tail()
276 if (entry->node == node) in node_affinity_lookup()
292 ret_cpu = -EINVAL; in per_cpu_affinity_get()
297 ret_cpu = -EINVAL; in per_cpu_affinity_get()
303 ret_cpu = -EINVAL; in per_cpu_affinity_get()
332 return -EINVAL; in per_cpu_affinity_put_max()
335 return -EINVAL; in per_cpu_affinity_put_max()
339 return -EINVAL; in per_cpu_affinity_put_max()
351 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1; in per_cpu_affinity_put_max()
357 * Non-interrupt CPUs are used first, then interrupt CPUs.
367 struct cpu_mask_set *set = dd->comp_vect; in _dev_comp_vect_cpu_get()
371 cpu = -1; in _dev_comp_vect_cpu_get()
376 cpu = -1; in _dev_comp_vect_cpu_get()
382 cpumask_andnot(available_cpus, &set->mask, &set->used); in _dev_comp_vect_cpu_get()
386 &entry->def_intr.used); in _dev_comp_vect_cpu_get()
388 /* If there are non-interrupt CPUs available, use them first */ in _dev_comp_vect_cpu_get()
391 else /* Otherwise, use interrupt CPUs */ in _dev_comp_vect_cpu_get()
395 cpu = -1; in _dev_comp_vect_cpu_get()
398 cpumask_set_cpu(cpu, &set->used); in _dev_comp_vect_cpu_get()
406 struct cpu_mask_set *set = dd->comp_vect; in _dev_comp_vect_cpu_put()
419 if (!dd->comp_vect_mappings) in _dev_comp_vect_mappings_destroy()
422 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_mappings_destroy()
423 cpu = dd->comp_vect_mappings[i]; in _dev_comp_vect_mappings_destroy()
425 dd->comp_vect_mappings[i] = -1; in _dev_comp_vect_mappings_destroy()
426 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_destroy()
428 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i); in _dev_comp_vect_mappings_destroy()
431 kfree(dd->comp_vect_mappings); in _dev_comp_vect_mappings_destroy()
432 dd->comp_vect_mappings = NULL; in _dev_comp_vect_mappings_destroy()
450 return -ENOMEM; in _dev_comp_vect_mappings_create()
454 return -ENOMEM; in _dev_comp_vect_mappings_create()
457 dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus, in _dev_comp_vect_mappings_create()
458 sizeof(*dd->comp_vect_mappings), in _dev_comp_vect_mappings_create()
460 if (!dd->comp_vect_mappings) { in _dev_comp_vect_mappings_create()
461 ret = -ENOMEM; in _dev_comp_vect_mappings_create()
464 for (i = 0; i < dd->comp_vect_possible_cpus; i++) in _dev_comp_vect_mappings_create()
465 dd->comp_vect_mappings[i] = -1; in _dev_comp_vect_mappings_create()
467 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_mappings_create()
471 ret = -EINVAL; in _dev_comp_vect_mappings_create()
475 dd->comp_vect_mappings[i] = cpu; in _dev_comp_vect_mappings_create()
476 hfi1_cdbg(AFFINITY, in _dev_comp_vect_mappings_create()
477 "[%s] Completion Vector %d -> CPU %d", in _dev_comp_vect_mappings_create()
478 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); in _dev_comp_vect_mappings_create()
499 entry = node_affinity_lookup(dd->node); in hfi1_comp_vectors_set_up()
501 ret = -EINVAL; in hfi1_comp_vectors_set_up()
521 if (!dd->comp_vect_mappings) in hfi1_comp_vect_mappings_lookup()
522 return -EINVAL; in hfi1_comp_vect_mappings_lookup()
523 if (comp_vect >= dd->comp_vect_possible_cpus) in hfi1_comp_vect_mappings_lookup()
524 return -EINVAL; in hfi1_comp_vect_mappings_lookup()
526 return dd->comp_vect_mappings[comp_vect]; in hfi1_comp_vect_mappings_lookup()
530 * It assumes dd->comp_vect_possible_cpus is available.
539 struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask; in _dev_comp_vect_cpu_mask_init()
549 if (cpumask_weight(&entry->comp_vect_mask) == 1) { in _dev_comp_vect_cpu_mask_init()
552 … "Number of kernel receive queues is too large for completion vector affinity to be effective\n"); in _dev_comp_vect_cpu_mask_init()
555 cpumask_weight(&entry->comp_vect_mask) / in _dev_comp_vect_cpu_mask_init()
556 hfi1_per_node_cntr[dd->node]; in _dev_comp_vect_cpu_mask_init()
564 cpumask_weight(&entry->comp_vect_mask) % in _dev_comp_vect_cpu_mask_init()
565 hfi1_per_node_cntr[dd->node] != 0) in _dev_comp_vect_cpu_mask_init()
569 dd->comp_vect_possible_cpus = possible_cpus_comp_vect; in _dev_comp_vect_cpu_mask_init()
572 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_cpu_mask_init()
573 curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask, in _dev_comp_vect_cpu_mask_init()
574 entry->comp_vect_affinity); in _dev_comp_vect_cpu_mask_init()
581 hfi1_cdbg(AFFINITY, in _dev_comp_vect_cpu_mask_init()
582 "[%s] Completion vector affinity CPU set(s) %*pbl", in _dev_comp_vect_cpu_mask_init()
583 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), in _dev_comp_vect_cpu_mask_init()
590 per_cpu_affinity_put_max(&entry->comp_vect_mask, in _dev_comp_vect_cpu_mask_init()
591 entry->comp_vect_affinity); in _dev_comp_vect_cpu_mask_init()
597 * It assumes dd->comp_vect_possible_cpus is available.
606 if (!dd->comp_vect_possible_cpus) in _dev_comp_vect_cpu_mask_clean_up()
609 for (i = 0; i < dd->comp_vect_possible_cpus; i++) { in _dev_comp_vect_cpu_mask_clean_up()
610 cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask, in _dev_comp_vect_cpu_mask_clean_up()
611 entry->comp_vect_affinity); in _dev_comp_vect_cpu_mask_clean_up()
614 cpumask_clear_cpu(cpu, &dd->comp_vect->mask); in _dev_comp_vect_cpu_mask_clean_up()
617 dd->comp_vect_possible_cpus = 0; in _dev_comp_vect_cpu_mask_clean_up()
621 * Interrupt affinity.
623 * non-rcv avail gets a default mask that
633 int node = pcibus_to_node(dd->pcidev->bus); in hfi1_dev_affinity_init()
647 dd->node = node; in hfi1_dev_affinity_init()
649 local_mask = cpumask_of_node(dd->node); in hfi1_dev_affinity_init()
654 entry = node_affinity_lookup(dd->node); in hfi1_dev_affinity_init()
657 * If this is the first time this NUMA node's affinity is used, in hfi1_dev_affinity_init()
658 * create an entry in the global affinity structure and initialize it. in hfi1_dev_affinity_init()
664 "Unable to allocate global affinity node\n"); in hfi1_dev_affinity_init()
665 ret = -ENOMEM; in hfi1_dev_affinity_init()
670 init_cpu_mask_set(&entry->def_intr); in hfi1_dev_affinity_init()
671 init_cpu_mask_set(&entry->rcv_intr); in hfi1_dev_affinity_init()
672 cpumask_clear(&entry->comp_vect_mask); in hfi1_dev_affinity_init()
673 cpumask_clear(&entry->general_intr_mask); in hfi1_dev_affinity_init()
675 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask, in hfi1_dev_affinity_init()
679 possible = cpumask_weight(&entry->def_intr.mask); in hfi1_dev_affinity_init()
680 curr_cpu = cpumask_first(&entry->def_intr.mask); in hfi1_dev_affinity_init()
684 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); in hfi1_dev_affinity_init()
685 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); in hfi1_dev_affinity_init()
690 * list and added to the general interrupt list. in hfi1_dev_affinity_init()
692 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask); in hfi1_dev_affinity_init()
693 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); in hfi1_dev_affinity_init()
695 &entry->def_intr.mask); in hfi1_dev_affinity_init()
702 i < (dd->n_krcv_queues - 1) * in hfi1_dev_affinity_init()
703 hfi1_per_node_cntr[dd->node]; in hfi1_dev_affinity_init()
706 &entry->def_intr.mask); in hfi1_dev_affinity_init()
708 &entry->rcv_intr.mask); in hfi1_dev_affinity_init()
710 &entry->def_intr.mask); in hfi1_dev_affinity_init()
720 if (cpumask_weight(&entry->def_intr.mask) == 0) in hfi1_dev_affinity_init()
721 cpumask_copy(&entry->def_intr.mask, in hfi1_dev_affinity_init()
722 &entry->general_intr_mask); in hfi1_dev_affinity_init()
726 cpumask_and(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
728 cpumask_andnot(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
729 &entry->comp_vect_mask, in hfi1_dev_affinity_init()
730 &entry->rcv_intr.mask); in hfi1_dev_affinity_init()
731 cpumask_andnot(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
732 &entry->comp_vect_mask, in hfi1_dev_affinity_init()
733 &entry->general_intr_mask); in hfi1_dev_affinity_init()
740 if (cpumask_weight(&entry->comp_vect_mask) == 0) in hfi1_dev_affinity_init()
741 cpumask_copy(&entry->comp_vect_mask, in hfi1_dev_affinity_init()
742 &entry->general_intr_mask); in hfi1_dev_affinity_init()
767 if (dd->node < 0) in hfi1_dev_affinity_clean_up()
771 entry = node_affinity_lookup(dd->node); in hfi1_dev_affinity_clean_up()
782 dd->node = -1; in hfi1_dev_affinity_clean_up()
786 * Function updates the irq affinity hint for msix after it has been changed
792 struct sdma_engine *sde = msix->arg; in hfi1_update_sdma_affinity()
793 struct hfi1_devdata *dd = sde->dd; in hfi1_update_sdma_affinity()
798 if (cpu > num_online_cpus() || cpu == sde->cpu) in hfi1_update_sdma_affinity()
802 entry = node_affinity_lookup(dd->node); in hfi1_update_sdma_affinity()
806 old_cpu = sde->cpu; in hfi1_update_sdma_affinity()
807 sde->cpu = cpu; in hfi1_update_sdma_affinity()
808 cpumask_clear(&msix->mask); in hfi1_update_sdma_affinity()
809 cpumask_set_cpu(cpu, &msix->mask); in hfi1_update_sdma_affinity()
810 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n", in hfi1_update_sdma_affinity()
811 msix->irq, irq_type_names[msix->type], in hfi1_update_sdma_affinity()
812 sde->this_idx, cpu); in hfi1_update_sdma_affinity()
813 irq_set_affinity_hint(msix->irq, &msix->mask); in hfi1_update_sdma_affinity()
819 set = &entry->def_intr; in hfi1_update_sdma_affinity()
820 cpumask_set_cpu(cpu, &set->mask); in hfi1_update_sdma_affinity()
821 cpumask_set_cpu(cpu, &set->used); in hfi1_update_sdma_affinity()
822 for (i = 0; i < dd->num_msix_entries; i++) { in hfi1_update_sdma_affinity()
825 other_msix = &dd->msix_entries[i]; in hfi1_update_sdma_affinity()
826 if (other_msix->type != IRQ_SDMA || other_msix == msix) in hfi1_update_sdma_affinity()
829 if (cpumask_test_cpu(old_cpu, &other_msix->mask)) in hfi1_update_sdma_affinity()
832 cpumask_clear_cpu(old_cpu, &set->mask); in hfi1_update_sdma_affinity()
833 cpumask_clear_cpu(old_cpu, &set->used); in hfi1_update_sdma_affinity()
853 * This is required by affinity notifier. We don't have anything to in hfi1_irq_notifier_release()
860 struct irq_affinity_notify *notify = &msix->notify; in hfi1_setup_sdma_notifier()
862 notify->irq = msix->irq; in hfi1_setup_sdma_notifier()
863 notify->notify = hfi1_irq_notifier_notify; in hfi1_setup_sdma_notifier()
864 notify->release = hfi1_irq_notifier_release; in hfi1_setup_sdma_notifier()
866 if (irq_set_affinity_notifier(notify->irq, notify)) in hfi1_setup_sdma_notifier()
867 pr_err("Failed to register sdma irq affinity notifier for irq %d\n", in hfi1_setup_sdma_notifier()
868 notify->irq); in hfi1_setup_sdma_notifier()
873 struct irq_affinity_notify *notify = &msix->notify; in hfi1_cleanup_sdma_notifier()
875 if (irq_set_affinity_notifier(notify->irq, NULL)) in hfi1_cleanup_sdma_notifier()
876 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n", in hfi1_cleanup_sdma_notifier()
877 notify->irq); in hfi1_cleanup_sdma_notifier()
881 * Function sets the irq affinity for msix.
893 int cpu = -1; in get_irq_affinity()
896 cpumask_clear(&msix->mask); in get_irq_affinity()
898 entry = node_affinity_lookup(dd->node); in get_irq_affinity()
900 switch (msix->type) { in get_irq_affinity()
902 sde = (struct sdma_engine *)msix->arg; in get_irq_affinity()
903 scnprintf(extra, 64, "engine %u", sde->this_idx); in get_irq_affinity()
904 set = &entry->def_intr; in get_irq_affinity()
907 cpu = cpumask_first(&entry->general_intr_mask); in get_irq_affinity()
910 rcd = (struct hfi1_ctxtdata *)msix->arg; in get_irq_affinity()
911 if (rcd->ctxt == HFI1_CTRL_CTXT) in get_irq_affinity()
912 cpu = cpumask_first(&entry->general_intr_mask); in get_irq_affinity()
914 set = &entry->rcv_intr; in get_irq_affinity()
915 scnprintf(extra, 64, "ctxt %u", rcd->ctxt); in get_irq_affinity()
918 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); in get_irq_affinity()
919 return -EINVAL; in get_irq_affinity()
927 if (cpu == -1 && set) { in get_irq_affinity()
929 return -ENOMEM; in get_irq_affinity()
941 cpumask_set_cpu(cpu, &msix->mask); in get_irq_affinity()
942 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n", in get_irq_affinity()
943 msix->irq, irq_type_names[msix->type], in get_irq_affinity()
945 irq_set_affinity_hint(msix->irq, &msix->mask); in get_irq_affinity()
947 if (msix->type == IRQ_SDMA) { in get_irq_affinity()
948 sde->cpu = cpu; in get_irq_affinity()
973 entry = node_affinity_lookup(dd->node); in hfi1_put_irq_affinity()
975 switch (msix->type) { in hfi1_put_irq_affinity()
977 set = &entry->def_intr; in hfi1_put_irq_affinity()
984 rcd = (struct hfi1_ctxtdata *)msix->arg; in hfi1_put_irq_affinity()
986 if (rcd->ctxt != HFI1_CTRL_CTXT) in hfi1_put_irq_affinity()
987 set = &entry->rcv_intr; in hfi1_put_irq_affinity()
995 cpumask_andnot(&set->used, &set->used, &msix->mask); in hfi1_put_irq_affinity()
999 irq_set_affinity_hint(msix->irq, NULL); in hfi1_put_irq_affinity()
1000 cpumask_clear(&msix->mask); in hfi1_put_irq_affinity()
1006 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument
1010 affinity->num_core_siblings / in find_hw_thread_mask()
1013 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask()
1014 if (affinity->num_core_siblings > 0) { in find_hw_thread_mask()
1038 int cpu = -1, ret, i; in hfi1_get_proc_affinity()
1042 *proc_mask = &current->cpus_allowed; in hfi1_get_proc_affinity()
1043 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_get_proc_affinity() local
1044 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity()
1047 * check whether process/context affinity has already in hfi1_get_proc_affinity()
1051 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", in hfi1_get_proc_affinity()
1052 current->pid, current->comm, in hfi1_get_proc_affinity()
1055 * Mark the pre-set CPU as used. This is atomic so we don't in hfi1_get_proc_affinity()
1059 cpumask_set_cpu(cpu, &set->used); in hfi1_get_proc_affinity()
1061 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { in hfi1_get_proc_affinity()
1062 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", in hfi1_get_proc_affinity()
1063 current->pid, current->comm, in hfi1_get_proc_affinity()
1069 * The process does not have a preset CPU affinity so find one to in hfi1_get_proc_affinity()
1102 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity()
1110 * If NUMA node has CPUs used by interrupt handlers, include them in the in hfi1_get_proc_affinity()
1111 * interrupt handler mask. in hfi1_get_proc_affinity()
1115 cpumask_copy(intrs_mask, (entry->def_intr.gen ? in hfi1_get_proc_affinity()
1116 &entry->def_intr.mask : in hfi1_get_proc_affinity()
1117 &entry->def_intr.used)); in hfi1_get_proc_affinity()
1118 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ? in hfi1_get_proc_affinity()
1119 &entry->rcv_intr.mask : in hfi1_get_proc_affinity()
1120 &entry->rcv_intr.used)); in hfi1_get_proc_affinity()
1121 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask); in hfi1_get_proc_affinity()
1126 cpumask_copy(hw_thread_mask, &set->mask); in hfi1_get_proc_affinity()
1132 if (affinity->num_core_siblings > 0) { in hfi1_get_proc_affinity()
1133 for (i = 0; i < affinity->num_core_siblings; i++) { in hfi1_get_proc_affinity()
1134 find_hw_thread_mask(i, hw_thread_mask, affinity); in hfi1_get_proc_affinity()
1142 * (set->mask == set->used) before this loop. in hfi1_get_proc_affinity()
1144 cpumask_andnot(diff, hw_thread_mask, &set->used); in hfi1_get_proc_affinity()
1158 cpumask_andnot(available_mask, available_mask, &set->used); in hfi1_get_proc_affinity()
1164 * CPUs as interrupt handlers. Then, CPUs running interrupt in hfi1_get_proc_affinity()
1168 * non-interrupt handlers available, so diff gets copied in hfi1_get_proc_affinity()
1170 * 2) If diff is empty, then all CPUs not running interrupt in hfi1_get_proc_affinity()
1172 * available CPUs running interrupt handlers. in hfi1_get_proc_affinity()
1184 cpumask_andnot(available_mask, hw_thread_mask, &set->used); in hfi1_get_proc_affinity()
1193 * CPUs as interrupt handlers. in hfi1_get_proc_affinity()
1204 cpu = -1; in hfi1_get_proc_affinity()
1206 cpumask_set_cpu(cpu, &set->used); in hfi1_get_proc_affinity()
1208 mutex_unlock(&affinity->lock); in hfi1_get_proc_affinity()
1224 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_put_proc_affinity() local
1225 struct cpu_mask_set *set = &affinity->proc; in hfi1_put_proc_affinity()
1230 mutex_lock(&affinity->lock); in hfi1_put_proc_affinity()
1233 mutex_unlock(&affinity->lock); in hfi1_put_proc_affinity()