1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic cpu hotunplug interrupt migration code copied from the
4 * arch/arm implementation
5 *
6 * Copyright (C) Russell King
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #include <linux/interrupt.h>
13 #include <linux/ratelimit.h>
14 #include <linux/irq.h>
15 #include <linux/cpumask.h>
16 #include <linux/sched/isolation.h>
17
18 #include "internals.h"
19
20 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
irq_needs_fixup(struct irq_data * d)21 static inline bool irq_needs_fixup(struct irq_data *d)
22 {
23 const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
24 unsigned int cpu = smp_processor_id();
25
26 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
27 /*
28 * The cpumask_empty() check is a workaround for interrupt chips,
29 * which do not implement effective affinity, but the architecture has
30 * enabled the config switch. Use the general affinity mask instead.
31 */
32 if (cpumask_empty(m))
33 m = irq_data_get_affinity_mask(d);
34
35 /*
36 * Sanity check. If the mask is not empty when excluding the outgoing
37 * CPU then it must contain at least one online CPU. The outgoing CPU
38 * has been removed from the online mask already.
39 */
40 if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
41 cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
42 /*
43 * If this happens then there was a missed IRQ fixup at some
44 * point. Warn about it and enforce fixup.
45 */
46 pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
47 cpumask_pr_args(m), d->irq, cpu);
48 return true;
49 }
50 #endif
51 return cpumask_test_cpu(cpu, m);
52 }
53
migrate_one_irq(struct irq_desc * desc)54 static bool migrate_one_irq(struct irq_desc *desc)
55 {
56 struct irq_data *d = irq_desc_get_irq_data(desc);
57 struct irq_chip *chip = irq_data_get_irq_chip(d);
58 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
59 const struct cpumask *affinity;
60 bool brokeaff = false;
61 int err;
62 #ifdef CONFIG_CPU_ISOLATION_OPT
63 struct cpumask available_cpus;
64 #endif
65
66 /*
67 * IRQ chip might be already torn down, but the irq descriptor is
68 * still in the radix tree. Also if the chip has no affinity setter,
69 * nothing can be done here.
70 */
71 if (!chip || !chip->irq_set_affinity) {
72 pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
73 return false;
74 }
75
76 /*
77 * No move required, if:
78 * - Interrupt is per cpu
79 * - Interrupt is not started
80 * - Affinity mask does not include this CPU.
81 *
82 * Note: Do not check desc->action as this might be a chained
83 * interrupt.
84 */
85 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
86 /*
87 * If an irq move is pending, abort it if the dying CPU is
88 * the sole target.
89 */
90 irq_fixup_move_pending(desc, false);
91 return false;
92 }
93
94 /*
95 * Complete an eventually pending irq move cleanup. If this
96 * interrupt was moved in hard irq context, then the vectors need
97 * to be cleaned up. It can't wait until this interrupt actually
98 * happens and this CPU was involved.
99 */
100 irq_force_complete_move(desc);
101
102 /*
103 * If there is a setaffinity pending, then try to reuse the pending
104 * mask, so the last change of the affinity does not get lost. If
105 * there is no move pending or the pending mask does not contain
106 * any online CPU, use the current affinity mask.
107 */
108 if (irq_fixup_move_pending(desc, true))
109 affinity = irq_desc_get_pending_mask(desc);
110 else
111 affinity = irq_data_get_affinity_mask(d);
112
113 /* Mask the chip for interrupts which cannot move in process context */
114 if (maskchip && chip->irq_mask)
115 chip->irq_mask(d);
116
117 #ifdef CONFIG_CPU_ISOLATION_OPT
118 cpumask_copy(&available_cpus, affinity);
119 cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
120 affinity = &available_cpus;
121 #endif
122
123 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
124 #ifdef CONFIG_CPU_ISOLATION_OPT
125 const struct cpumask *default_affinity;
126 #endif
127
128 /*
129 * If the interrupt is managed, then shut it down and leave
130 * the affinity untouched.
131 */
132 if (irqd_affinity_is_managed(d)) {
133 irqd_set_managed_shutdown(d);
134 irq_shutdown_and_deactivate(desc);
135 return false;
136 }
137
138 #ifdef CONFIG_CPU_ISOLATION_OPT
139 default_affinity = desc->affinity_hint ? : irq_default_affinity;
140 /*
141 * The order of preference for selecting a fallback CPU is
142 *
143 * (1) online and un-isolated CPU from default affinity
144 * (2) online and un-isolated CPU
145 * (3) online CPU
146 */
147 cpumask_andnot(&available_cpus, cpu_online_mask,
148 cpu_isolated_mask);
149 if (cpumask_intersects(&available_cpus, default_affinity))
150 cpumask_and(&available_cpus, &available_cpus,
151 default_affinity);
152 else if (cpumask_empty(&available_cpus))
153 affinity = cpu_online_mask;
154
155 /*
156 * We are overriding the affinity with all online and
157 * un-isolated cpus. irq_set_affinity_locked() call
158 * below notify this mask to PM QOS affinity listener.
159 * That results in applying the CPU_DMA_LATENCY QOS
160 * to all the CPUs specified in the mask. But the low
161 * level irqchip driver sets the affinity of an irq
162 * to only one CPU. So pick only one CPU from the
163 * prepared mask while overriding the user affinity.
164 */
165 affinity = cpumask_of(cpumask_any(affinity));
166 #else
167 affinity = cpu_online_mask;
168 #endif
169 brokeaff = true;
170 }
171 /*
172 * Do not set the force argument of irq_do_set_affinity() as this
173 * disables the masking of offline CPUs from the supplied affinity
174 * mask and therefore might keep/reassign the irq to the outgoing
175 * CPU.
176 */
177 #ifdef CONFIG_CPU_ISOLATION_OPT
178 err = irq_set_affinity_locked(d, affinity, false);
179 #else
180 err = irq_do_set_affinity(d, affinity, false);
181 #endif
182 if (err) {
183 pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
184 d->irq, err);
185 brokeaff = false;
186 }
187
188 if (maskchip && chip->irq_unmask)
189 chip->irq_unmask(d);
190
191 return brokeaff;
192 }
193
194 /**
195 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
196 *
197 * The current CPU has been marked offline. Migrate IRQs off this CPU.
198 * If the affinity settings do not allow other CPUs, force them onto any
199 * available CPU.
200 *
201 * Note: we must iterate over all IRQs, whether they have an attached
202 * action structure or not, as we need to get chained interrupts too.
203 */
irq_migrate_all_off_this_cpu(void)204 void irq_migrate_all_off_this_cpu(void)
205 {
206 struct irq_desc *desc;
207 unsigned int irq;
208
209 for_each_active_irq(irq) {
210 bool affinity_broken;
211
212 desc = irq_to_desc(irq);
213 raw_spin_lock(&desc->lock);
214 affinity_broken = migrate_one_irq(desc);
215 raw_spin_unlock(&desc->lock);
216
217 if (affinity_broken) {
218 pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
219 irq, smp_processor_id());
220 }
221 }
222 }
223
hk_should_isolate(struct irq_data * data,unsigned int cpu)224 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
225 {
226 const struct cpumask *hk_mask;
227
228 if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
229 return false;
230
231 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
232 if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
233 return false;
234
235 return cpumask_test_cpu(cpu, hk_mask);
236 }
237
irq_restore_affinity_of_irq(struct irq_desc * desc,unsigned int cpu)238 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
239 {
240 struct irq_data *data = irq_desc_get_irq_data(desc);
241 const struct cpumask *affinity = irq_data_get_affinity_mask(data);
242
243 if (!irqd_affinity_is_managed(data) || !desc->action ||
244 !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
245 return;
246
247 if (irqd_is_managed_and_shutdown(data)) {
248 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
249 return;
250 }
251
252 /*
253 * If the interrupt can only be directed to a single target
254 * CPU then it is already assigned to a CPU in the affinity
255 * mask. No point in trying to move it around unless the
256 * isolation mechanism requests to move it to an upcoming
257 * housekeeping CPU.
258 */
259 if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
260 irq_set_affinity_locked(data, affinity, false);
261 }
262
263 /**
264 * irq_affinity_online_cpu - Restore affinity for managed interrupts
265 * @cpu: Upcoming CPU for which interrupts should be restored
266 */
irq_affinity_online_cpu(unsigned int cpu)267 int irq_affinity_online_cpu(unsigned int cpu)
268 {
269 struct irq_desc *desc;
270 unsigned int irq;
271
272 irq_lock_sparse();
273 for_each_active_irq(irq) {
274 desc = irq_to_desc(irq);
275 raw_spin_lock_irq(&desc->lock);
276 irq_restore_affinity_of_irq(desc, cpu);
277 raw_spin_unlock_irq(&desc->lock);
278 }
279 irq_unlock_sparse();
280
281 return 0;
282 }
283