• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm_host.h>
3 
4 #include <asm/irq_remapping.h>
5 #include <asm/cpu.h>
6 
7 #include "lapic.h"
8 #include "irq.h"
9 #include "posted_intr.h"
10 #include "trace.h"
11 #include "vmx.h"
12 
13 /*
14  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
15  * can find which vCPU should be waken up.
16  */
17 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
18 static DEFINE_PER_CPU(raw_spinlock_t, blocked_vcpu_on_cpu_lock);
19 
vcpu_to_pi_desc(struct kvm_vcpu * vcpu)20 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
21 {
22 	return &(to_vmx(vcpu)->pi_desc);
23 }
24 
vmx_vcpu_pi_load(struct kvm_vcpu * vcpu,int cpu)25 void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
26 {
27 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
28 	struct pi_desc old, new;
29 	unsigned int dest;
30 
31 	/*
32 	 * In case of hot-plug or hot-unplug, we may have to undo
33 	 * vmx_vcpu_pi_put even if there is no assigned device.  And we
34 	 * always keep PI.NDST up to date for simplicity: it makes the
35 	 * code easier, and CPU migration is not a fast path.
36 	 */
37 	if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
38 		return;
39 
40 	/*
41 	 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
42 	 * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
43 	 * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
44 	 * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
45 	 * correctly.
46 	 */
47 	if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
48 		pi_clear_sn(pi_desc);
49 		goto after_clear_sn;
50 	}
51 
52 	/* The full case.  */
53 	do {
54 		old.control = new.control = pi_desc->control;
55 
56 		dest = cpu_physical_id(cpu);
57 
58 		if (x2apic_enabled())
59 			new.ndst = dest;
60 		else
61 			new.ndst = (dest << 8) & 0xFF00;
62 
63 		new.sn = 0;
64 	} while (cmpxchg64(&pi_desc->control, old.control,
65 			   new.control) != old.control);
66 
67 after_clear_sn:
68 
69 	/*
70 	 * Clear SN before reading the bitmap.  The VT-d firmware
71 	 * writes the bitmap and reads SN atomically (5.2.3 in the
72 	 * spec), so it doesn't really have a memory barrier that
73 	 * pairs with this, but we cannot do that and we need one.
74 	 */
75 	smp_mb__after_atomic();
76 
77 	if (!pi_is_pir_empty(pi_desc))
78 		pi_set_on(pi_desc);
79 }
80 
vmx_can_use_vtd_pi(struct kvm * kvm)81 static bool vmx_can_use_vtd_pi(struct kvm *kvm)
82 {
83 	return irqchip_in_kernel(kvm) && enable_apicv &&
84 		kvm_arch_has_assigned_device(kvm) &&
85 		irq_remapping_cap(IRQ_POSTING_CAP);
86 }
87 
vmx_vcpu_pi_put(struct kvm_vcpu * vcpu)88 void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
89 {
90 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
91 
92 	if (!vmx_can_use_vtd_pi(vcpu->kvm))
93 		return;
94 
95 	/* Set SN when the vCPU is preempted */
96 	if (vcpu->preempted)
97 		pi_set_sn(pi_desc);
98 }
99 
__pi_post_block(struct kvm_vcpu * vcpu)100 static void __pi_post_block(struct kvm_vcpu *vcpu)
101 {
102 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
103 	struct pi_desc old, new;
104 	unsigned int dest;
105 
106 	do {
107 		old.control = new.control = pi_desc->control;
108 		WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
109 		     "Wakeup handler not enabled while the VCPU is blocked\n");
110 
111 		dest = cpu_physical_id(vcpu->cpu);
112 
113 		if (x2apic_enabled())
114 			new.ndst = dest;
115 		else
116 			new.ndst = (dest << 8) & 0xFF00;
117 
118 		/* set 'NV' to 'notification vector' */
119 		new.nv = POSTED_INTR_VECTOR;
120 	} while (cmpxchg64(&pi_desc->control, old.control,
121 			   new.control) != old.control);
122 
123 	if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
124 		raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
125 		list_del(&vcpu->blocked_vcpu_list);
126 		raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
127 		vcpu->pre_pcpu = -1;
128 	}
129 }
130 
131 /*
132  * This routine does the following things for vCPU which is going
133  * to be blocked if VT-d PI is enabled.
134  * - Store the vCPU to the wakeup list, so when interrupts happen
135  *   we can find the right vCPU to wake up.
136  * - Change the Posted-interrupt descriptor as below:
137  *      'NDST' <-- vcpu->pre_pcpu
138  *      'NV' <-- POSTED_INTR_WAKEUP_VECTOR
139  * - If 'ON' is set during this process, which means at least one
140  *   interrupt is posted for this vCPU, we cannot block it, in
141  *   this case, return 1, otherwise, return 0.
142  *
143  */
pi_pre_block(struct kvm_vcpu * vcpu)144 int pi_pre_block(struct kvm_vcpu *vcpu)
145 {
146 	unsigned int dest;
147 	struct pi_desc old, new;
148 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
149 
150 	if (!vmx_can_use_vtd_pi(vcpu->kvm))
151 		return 0;
152 
153 	WARN_ON(irqs_disabled());
154 	local_irq_disable();
155 	if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
156 		vcpu->pre_pcpu = vcpu->cpu;
157 		raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
158 		list_add_tail(&vcpu->blocked_vcpu_list,
159 			      &per_cpu(blocked_vcpu_on_cpu,
160 				       vcpu->pre_pcpu));
161 		raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
162 	}
163 
164 	do {
165 		old.control = new.control = pi_desc->control;
166 
167 		WARN((pi_desc->sn == 1),
168 		     "Warning: SN field of posted-interrupts "
169 		     "is set before blocking\n");
170 
171 		/*
172 		 * Since vCPU can be preempted during this process,
173 		 * vcpu->cpu could be different with pre_pcpu, we
174 		 * need to set pre_pcpu as the destination of wakeup
175 		 * notification event, then we can find the right vCPU
176 		 * to wakeup in wakeup handler if interrupts happen
177 		 * when the vCPU is in blocked state.
178 		 */
179 		dest = cpu_physical_id(vcpu->pre_pcpu);
180 
181 		if (x2apic_enabled())
182 			new.ndst = dest;
183 		else
184 			new.ndst = (dest << 8) & 0xFF00;
185 
186 		/* set 'NV' to 'wakeup vector' */
187 		new.nv = POSTED_INTR_WAKEUP_VECTOR;
188 	} while (cmpxchg64(&pi_desc->control, old.control,
189 			   new.control) != old.control);
190 
191 	/* We should not block the vCPU if an interrupt is posted for it.  */
192 	if (pi_test_on(pi_desc) == 1)
193 		__pi_post_block(vcpu);
194 
195 	local_irq_enable();
196 	return (vcpu->pre_pcpu == -1);
197 }
198 
pi_post_block(struct kvm_vcpu * vcpu)199 void pi_post_block(struct kvm_vcpu *vcpu)
200 {
201 	if (vcpu->pre_pcpu == -1)
202 		return;
203 
204 	WARN_ON(irqs_disabled());
205 	local_irq_disable();
206 	__pi_post_block(vcpu);
207 	local_irq_enable();
208 }
209 
210 /*
211  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
212  */
pi_wakeup_handler(void)213 void pi_wakeup_handler(void)
214 {
215 	struct kvm_vcpu *vcpu;
216 	int cpu = smp_processor_id();
217 
218 	raw_spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
219 	list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
220 			blocked_vcpu_list) {
221 		struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
222 
223 		if (pi_test_on(pi_desc) == 1)
224 			kvm_vcpu_kick(vcpu);
225 	}
226 	raw_spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
227 }
228 
pi_init_cpu(int cpu)229 void __init pi_init_cpu(int cpu)
230 {
231 	INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
232 	raw_spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
233 }
234 
pi_has_pending_interrupt(struct kvm_vcpu * vcpu)235 bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
236 {
237 	struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
238 
239 	return pi_test_on(pi_desc) ||
240 		(pi_test_sn(pi_desc) && !pi_is_pir_empty(pi_desc));
241 }
242 
243 
244 /*
245  * pi_update_irte - set IRTE for Posted-Interrupts
246  *
247  * @kvm: kvm
248  * @host_irq: host irq of the interrupt
249  * @guest_irq: gsi of the interrupt
250  * @set: set or unset PI
251  * returns 0 on success, < 0 on failure
252  */
pi_update_irte(struct kvm * kvm,unsigned int host_irq,uint32_t guest_irq,bool set)253 int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
254 		   bool set)
255 {
256 	struct kvm_kernel_irq_routing_entry *e;
257 	struct kvm_irq_routing_table *irq_rt;
258 	struct kvm_lapic_irq irq;
259 	struct kvm_vcpu *vcpu;
260 	struct vcpu_data vcpu_info;
261 	int idx, ret = 0;
262 
263 	if (!vmx_can_use_vtd_pi(kvm))
264 		return 0;
265 
266 	idx = srcu_read_lock(&kvm->irq_srcu);
267 	irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
268 	if (guest_irq >= irq_rt->nr_rt_entries ||
269 	    hlist_empty(&irq_rt->map[guest_irq])) {
270 		pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
271 			     guest_irq, irq_rt->nr_rt_entries);
272 		goto out;
273 	}
274 
275 	hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
276 		if (e->type != KVM_IRQ_ROUTING_MSI)
277 			continue;
278 		/*
279 		 * VT-d PI cannot support posting multicast/broadcast
280 		 * interrupts to a vCPU, we still use interrupt remapping
281 		 * for these kind of interrupts.
282 		 *
283 		 * For lowest-priority interrupts, we only support
284 		 * those with single CPU as the destination, e.g. user
285 		 * configures the interrupts via /proc/irq or uses
286 		 * irqbalance to make the interrupts single-CPU.
287 		 *
288 		 * We will support full lowest-priority interrupt later.
289 		 *
290 		 * In addition, we can only inject generic interrupts using
291 		 * the PI mechanism, refuse to route others through it.
292 		 */
293 
294 		kvm_set_msi_irq(kvm, e, &irq);
295 		if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
296 		    !kvm_irq_is_postable(&irq)) {
297 			/*
298 			 * Make sure the IRTE is in remapped mode if
299 			 * we don't handle it in posted mode.
300 			 */
301 			ret = irq_set_vcpu_affinity(host_irq, NULL);
302 			if (ret < 0) {
303 				printk(KERN_INFO
304 				   "failed to back to remapped mode, irq: %u\n",
305 				   host_irq);
306 				goto out;
307 			}
308 
309 			continue;
310 		}
311 
312 		vcpu_info.pi_desc_addr = __pa(&to_vmx(vcpu)->pi_desc);
313 		vcpu_info.vector = irq.vector;
314 
315 		trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
316 				vcpu_info.vector, vcpu_info.pi_desc_addr, set);
317 
318 		if (set)
319 			ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
320 		else
321 			ret = irq_set_vcpu_affinity(host_irq, NULL);
322 
323 		if (ret < 0) {
324 			printk(KERN_INFO "%s: failed to update PI IRTE\n",
325 					__func__);
326 			goto out;
327 		}
328 	}
329 
330 	ret = 0;
331 out:
332 	srcu_read_unlock(&kvm->irq_srcu, idx);
333 	return ret;
334 }
335