• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License, version 2, as
6  * published by the Free Software Foundation.
7  */
8 
9 #ifndef _KVM_PPC_BOOK3S_XIVE_H
10 #define _KVM_PPC_BOOK3S_XIVE_H
11 
12 #ifdef CONFIG_KVM_XICS
13 #include "book3s_xics.h"
14 
15 /*
16  * State for one guest irq source.
17  *
18  * For each guest source we allocate a HW interrupt in the XIVE
19  * which we use for all SW triggers. It will be unused for
20  * pass-through but it's easier to keep around as the same
21  * guest interrupt can alternatively be emulated or pass-through
22  * if a physical device is hot unplugged and replaced with an
23  * emulated one.
24  *
25  * This state structure is very similar to the XICS one with
26  * additional XIVE specific tracking.
27  */
28 struct kvmppc_xive_irq_state {
29 	bool valid;			/* Interrupt entry is valid */
30 
31 	u32 number;			/* Guest IRQ number */
32 	u32 ipi_number;			/* XIVE IPI HW number */
33 	struct xive_irq_data ipi_data;	/* XIVE IPI associated data */
34 	u32 pt_number;			/* XIVE Pass-through number if any */
35 	struct xive_irq_data *pt_data;	/* XIVE Pass-through associated data */
36 
37 	/* Targetting as set by guest */
38 	u8 guest_priority;		/* Guest set priority */
39 	u8 saved_priority;		/* Saved priority when masking */
40 
41 	/* Actual targetting */
42 	u32 act_server;			/* Actual server */
43 	u8 act_priority;		/* Actual priority */
44 
45 	/* Various state bits */
46 	bool in_eoi;			/* Synchronize with H_EOI */
47 	bool old_p;			/* P bit state when masking */
48 	bool old_q;			/* Q bit state when masking */
49 	bool lsi;			/* level-sensitive interrupt */
50 	bool asserted;			/* Only for emulated LSI: current state */
51 
52 	/* Saved for migration state */
53 	bool in_queue;
54 	bool saved_p;
55 	bool saved_q;
56 	u8 saved_scan_prio;
57 };
58 
59 /* Select the "right" interrupt (IPI vs. passthrough) */
kvmppc_xive_select_irq(struct kvmppc_xive_irq_state * state,u32 * out_hw_irq,struct xive_irq_data ** out_xd)60 static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
61 					  u32 *out_hw_irq,
62 					  struct xive_irq_data **out_xd)
63 {
64 	if (state->pt_number) {
65 		if (out_hw_irq)
66 			*out_hw_irq = state->pt_number;
67 		if (out_xd)
68 			*out_xd = state->pt_data;
69 	} else {
70 		if (out_hw_irq)
71 			*out_hw_irq = state->ipi_number;
72 		if (out_xd)
73 			*out_xd = &state->ipi_data;
74 	}
75 }
76 
77 /*
78  * This corresponds to an "ICS" in XICS terminology, we use it
79  * as a mean to break up source information into multiple structures.
80  */
81 struct kvmppc_xive_src_block {
82 	arch_spinlock_t lock;
83 	u16 id;
84 	struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
85 };
86 
87 
88 struct kvmppc_xive {
89 	struct kvm *kvm;
90 	struct kvm_device *dev;
91 	struct dentry *dentry;
92 
93 	/* VP block associated with the VM */
94 	u32	vp_base;
95 
96 	/* Blocks of sources */
97 	struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
98 	u32	max_sbid;
99 
100 	/*
101 	 * For state save, we lazily scan the queues on the first interrupt
102 	 * being migrated. We don't have a clean way to reset that flags
103 	 * so we keep track of the number of valid sources and how many of
104 	 * them were migrated so we can reset when all of them have been
105 	 * processed.
106 	 */
107 	u32	src_count;
108 	u32	saved_src_count;
109 
110 	/*
111 	 * Some irqs are delayed on restore until the source is created,
112 	 * keep track here of how many of them
113 	 */
114 	u32	delayed_irqs;
115 
116 	/* Which queues (priorities) are in use by the guest */
117 	u8	qmap;
118 
119 	/* Queue orders */
120 	u32	q_order;
121 	u32	q_page_order;
122 
123 };
124 
125 #define KVMPPC_XIVE_Q_COUNT	8
126 
127 struct kvmppc_xive_vcpu {
128 	struct kvmppc_xive	*xive;
129 	struct kvm_vcpu		*vcpu;
130 	bool			valid;
131 
132 	/* Server number. This is the HW CPU ID from a guest perspective */
133 	u32			server_num;
134 
135 	/*
136 	 * HW VP corresponding to this VCPU. This is the base of the VP
137 	 * block plus the server number.
138 	 */
139 	u32			vp_id;
140 	u32			vp_chip_id;
141 	u32			vp_cam;
142 
143 	/* IPI used for sending ... IPIs */
144 	u32			vp_ipi;
145 	struct xive_irq_data	vp_ipi_data;
146 
147 	/* Local emulation state */
148 	uint8_t			cppr;	/* guest CPPR */
149 	uint8_t			hw_cppr;/* Hardware CPPR */
150 	uint8_t			mfrr;
151 	uint8_t			pending;
152 
153 	/* Each VP has 8 queues though we only provision some */
154 	struct xive_q		queues[KVMPPC_XIVE_Q_COUNT];
155 	u32			esc_virq[KVMPPC_XIVE_Q_COUNT];
156 	char			*esc_virq_names[KVMPPC_XIVE_Q_COUNT];
157 
158 	/* Stash a delayed irq on restore from migration (see set_icp) */
159 	u32			delayed_irq;
160 
161 	/* Stats */
162 	u64			stat_rm_h_xirr;
163 	u64			stat_rm_h_ipoll;
164 	u64			stat_rm_h_cppr;
165 	u64			stat_rm_h_eoi;
166 	u64			stat_rm_h_ipi;
167 	u64			stat_vm_h_xirr;
168 	u64			stat_vm_h_ipoll;
169 	u64			stat_vm_h_cppr;
170 	u64			stat_vm_h_eoi;
171 	u64			stat_vm_h_ipi;
172 };
173 
kvmppc_xive_find_server(struct kvm * kvm,u32 nr)174 static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
175 {
176 	struct kvm_vcpu *vcpu = NULL;
177 	int i;
178 
179 	kvm_for_each_vcpu(i, vcpu, kvm) {
180 		if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
181 			return vcpu;
182 	}
183 	return NULL;
184 }
185 
kvmppc_xive_find_source(struct kvmppc_xive * xive,u32 irq,u16 * source)186 static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
187 		u32 irq, u16 *source)
188 {
189 	u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
190 	u16 src = irq & KVMPPC_XICS_SRC_MASK;
191 
192 	if (source)
193 		*source = src;
194 	if (bid > KVMPPC_XICS_MAX_ICS_ID)
195 		return NULL;
196 	return xive->src_blocks[bid];
197 }
198 
199 /*
200  * Mapping between guest priorities and host priorities
201  * is as follow.
202  *
203  * Guest request for 0...6 are honored. Guest request for anything
204  * higher results in a priority of 7 being applied.
205  *
206  * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb
207  * in order to match AIX expectations
208  *
209  * Similar mapping is done for CPPR values
210  */
xive_prio_from_guest(u8 prio)211 static inline u8 xive_prio_from_guest(u8 prio)
212 {
213 	if (prio == 0xff || prio < 8)
214 		return prio;
215 	return 7;
216 }
217 
xive_prio_to_guest(u8 prio)218 static inline u8 xive_prio_to_guest(u8 prio)
219 {
220 	if (prio == 0xff || prio < 7)
221 		return prio;
222 	return 0xb;
223 }
224 
__xive_read_eq(__be32 * qpage,u32 msk,u32 * idx,u32 * toggle)225 static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
226 {
227 	u32 cur;
228 
229 	if (!qpage)
230 		return 0;
231 	cur = be32_to_cpup(qpage + *idx);
232 	if ((cur >> 31) == *toggle)
233 		return 0;
234 	*idx = (*idx + 1) & msk;
235 	if (*idx == 0)
236 		(*toggle) ^= 1;
237 	return cur & 0x7fffffff;
238 }
239 
240 extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
241 extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
242 extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
243 			 unsigned long mfrr);
244 extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
245 extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
246 
247 extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
248 extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
249 extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
250 			      unsigned long mfrr);
251 extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
252 extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
253 
254 #endif /* CONFIG_KVM_XICS */
255 #endif /* _KVM_PPC_BOOK3S_XICS_H */
256