/kernel/linux/linux-5.10/kernel/irq/ |
D | cpuhotplug.c | 59 const struct cpumask *affinity; in migrate_one_irq() local 109 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq() 111 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq() 118 cpumask_copy(&available_cpus, affinity); in migrate_one_irq() 120 affinity = &available_cpus; in migrate_one_irq() 123 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { in migrate_one_irq() 153 affinity = cpu_online_mask; in migrate_one_irq() 165 affinity = cpumask_of(cpumask_any(affinity)); in migrate_one_irq() 167 affinity = cpu_online_mask; in migrate_one_irq() 178 err = irq_set_affinity_locked(d, affinity, false); in migrate_one_irq() [all …]
|
D | irqdesc.c | 57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks() 64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks() 82 const struct cpumask *affinity) in desc_smp_init() argument 84 if (!affinity) in desc_smp_init() 85 affinity = irq_default_affinity; in desc_smp_init() 86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init() 100 desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } in desc_smp_init() argument 104 const struct cpumask *affinity, struct module *owner) in desc_set_defaults() argument 127 desc_smp_init(desc, node, affinity); in desc_set_defaults() [all …]
|
D | msi.c | 30 const struct irq_affinity_desc *affinity) in alloc_msi_entry() argument 41 if (affinity) { in alloc_msi_entry() 42 desc->affinity = kmemdup(affinity, in alloc_msi_entry() 43 nvec * sizeof(*desc->affinity), GFP_KERNEL); in alloc_msi_entry() 44 if (!desc->affinity) { in alloc_msi_entry() 55 kfree(entry->affinity); in free_msi_entry() 418 desc->affinity); in __msi_domain_alloc_irqs()
|
D | devres.c | 180 const struct irq_affinity_desc *affinity) in __devm_irq_alloc_descs() argument 189 base = __irq_alloc_descs(irq, from, cnt, node, owner, affinity); in __devm_irq_alloc_descs()
|
/kernel/linux/linux-5.10/tools/testing/selftests/rseq/ |
D | basic_test.c | 18 cpu_set_t affinity, test_affinity; in test_cpu_pointer() local 21 sched_getaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer() 24 if (CPU_ISSET(i, &affinity)) { in test_cpu_pointer() 35 sched_setaffinity(0, sizeof(affinity), &affinity); in test_cpu_pointer()
|
/kernel/linux/linux-5.10/tools/perf/util/ |
D | affinity.h | 7 struct affinity { struct 13 void affinity__cleanup(struct affinity *a); argument 14 void affinity__set(struct affinity *a, int cpu); 15 int affinity__setup(struct affinity *a);
|
D | affinity.c | 24 int affinity__setup(struct affinity *a) in affinity__setup() 48 void affinity__set(struct affinity *a, int cpu) in affinity__set() 65 void affinity__cleanup(struct affinity *a) in affinity__cleanup()
|
D | mmap.c | 97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) in perf_mmap__aio_bind() argument 105 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { in perf_mmap__aio_bind() 141 int cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind() 175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 265 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask() 267 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask() 281 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
|
D | evlist.c | 382 struct affinity affinity; in evlist__disable() local 386 if (affinity__setup(&affinity) < 0) in evlist__disable() 392 affinity__set(&affinity, cpu); in evlist__disable() 410 affinity__cleanup(&affinity); in evlist__disable() 423 struct affinity affinity; in evlist__enable() local 426 if (affinity__setup(&affinity) < 0) in evlist__enable() 430 affinity__set(&affinity, cpu); in evlist__enable() 440 affinity__cleanup(&affinity); in evlist__enable() 881 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, in evlist__mmap_ex() argument 891 .affinity = affinity, in evlist__mmap_ex() [all …]
|
/kernel/linux/linux-5.10/tools/virtio/ringtest/ |
D | run-on-all.sh | 20 "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu 24 "$@" --host-affinity $HOST_AFFINITY
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
D | affinity.c | 1008 struct hfi1_affinity_node_list *affinity) in find_hw_thread_mask() argument 1012 affinity->num_core_siblings / in find_hw_thread_mask() 1015 cpumask_copy(hw_thread_mask, &affinity->proc.mask); in find_hw_thread_mask() 1016 if (affinity->num_core_siblings > 0) { in find_hw_thread_mask() 1045 struct hfi1_affinity_node_list *affinity = &node_affinity; in hfi1_get_proc_affinity() local 1046 struct cpu_mask_set *set = &affinity->proc; in hfi1_get_proc_affinity() 1104 mutex_lock(&affinity->lock); in hfi1_get_proc_affinity() 1134 if (affinity->num_core_siblings > 0) { in hfi1_get_proc_affinity() 1135 for (i = 0; i < affinity->num_core_siblings; i++) { in hfi1_get_proc_affinity() 1136 find_hw_thread_mask(i, hw_thread_mask, affinity); in hfi1_get_proc_affinity() [all …]
|
/kernel/linux/linux-5.10/Documentation/core-api/irq/ |
D | irq-affinity.rst | 2 SMP IRQ affinity 14 IRQ affinity then the value will not change from the default of all cpus. 16 /proc/irq/default_smp_affinity specifies default affinity mask that applies 17 to all non-active IRQs. Once IRQ is allocated/activated its affinity bitmask
|
D | index.rst | 9 irq-affinity
|
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
D | setup.c | 114 u32 i, affinity, fs[4], bits[4], ls; in smp_build_mpidr_hash() local 128 affinity = MPIDR_AFFINITY_LEVEL(mask, i); in smp_build_mpidr_hash() 134 ls = fls(affinity); in smp_build_mpidr_hash() 135 fs[i] = affinity ? ffs(affinity) - 1 : 0; in smp_build_mpidr_hash()
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/ |
D | bench.c | 19 .affinity = false, 180 env.affinity = true; in parse_arg() 183 env.affinity = true; in parse_arg() 191 env.affinity = true; in parse_arg() 390 if (env.affinity) in setup_benchmark() 409 if (env.affinity) in setup_benchmark()
|
/kernel/linux/linux-5.10/arch/alpha/kernel/ |
D | sys_dp264.c | 136 cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in cpu_set_irq_affinity() argument 142 if (cpumask_test_cpu(cpu, &affinity)) in cpu_set_irq_affinity() 151 dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, in dp264_set_affinity() argument 155 cpu_set_irq_affinity(d->irq, *affinity); in dp264_set_affinity() 163 clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, in clipper_set_affinity() argument 167 cpu_set_irq_affinity(d->irq - 16, *affinity); in clipper_set_affinity()
|
D | sys_titan.c | 135 titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) in titan_cpu_set_irq_affinity() argument 140 if (cpumask_test_cpu(cpu, &affinity)) in titan_cpu_set_irq_affinity() 149 titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, in titan_set_irq_affinity() argument 154 titan_cpu_set_irq_affinity(irq - 16, *affinity); in titan_set_irq_affinity()
|
/kernel/liteos_a/arch/arm/gic/ |
D | gic_v3.c | 128 UINT64 affinity = MpidrToAffinity(NextCpu(0, cpuMask)); in HalIrqSetAffinity() local 131 GIC_REG_64(GICD_IROUTER(irq)) = affinity; in HalIrqSetAffinity() 358 UINT64 affinity; in HalIrqInit() local 395 affinity = MpidrToAffinity(AARCH64_SYSREG_READ(mpidr_el1)); in HalIrqInit() 397 GIC_REG_64(GICD_IROUTER(i)) = affinity; in HalIrqInit()
|
/kernel/linux/linux-5.10/drivers/irqchip/ |
D | irq-bcm7038-l1.c | 51 u8 affinity[MAX_WORDS * IRQS_PER_WORD]; member 185 __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); in bcm7038_l1_unmask() 195 __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); in bcm7038_l1_mask() 213 was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & in bcm7038_l1_set_affinity() 215 __bcm7038_l1_mask(d, intc->affinity[hw]); in bcm7038_l1_set_affinity() 216 intc->affinity[hw] = first_cpu; in bcm7038_l1_set_affinity()
|
/kernel/linux/linux-5.10/include/ras/ |
D | ras_event.h | 182 __field(u8, affinity) 187 __entry->affinity = proc->affinity_level; 189 __entry->affinity = ~0; 206 __entry->affinity, __entry->mpidr, __entry->midr,
|
/kernel/linux/linux-5.10/include/linux/ |
D | irq.h | 152 cpumask_var_t affinity; member 722 const struct cpumask *affinity); 724 struct cpumask *affinity); 881 return d ? d->common->affinity : NULL; in irq_get_affinity_mask() 886 return d->common->affinity; in irq_data_get_affinity_mask() 908 return d->common->affinity; in irq_data_get_effective_affinity_mask() 916 const struct irq_affinity_desc *affinity); 920 const struct irq_affinity_desc *affinity);
|
D | cpu_rmap.h | 40 const struct cpumask *affinity);
|
/kernel/linux/linux-5.10/Documentation/ia64/ |
D | irq-redir.rst | 2 IRQ affinity on IA64 platforms 10 that described in Documentation/core-api/irq/irq-affinity.rst for i386 systems. 77 For systems like the NEC AzusA we get IRQ node-affinity for free. This
|
/kernel/linux/linux-5.10/arch/arm/mach-vexpress/ |
D | dcscb_setup.S | 14 cmp r0, #0 @ check affinity level
|
/kernel/linux/patches/linux-5.10/imx8mm_patch/patches/drivers/ |
D | 0025_linux_drivers_irqchip.patch | 69 + u64 affinity; 80 + * set the affinity of the SPI here. 81 + * This allows to set the affinity to only the interrupts 84 + affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 85 + gic_write_irouter(affinity, base + GICD_IROUTER + irq * 8); 96 u64 affinity; 124 + * do not set the affinity to all interrupts as this 136 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
|