• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  */
18 
19 #ifndef __ASM_ARM_KVM_VGIC_H
20 #define __ASM_ARM_KVM_VGIC_H
21 
22 #include <linux/kernel.h>
23 #include <linux/kvm.h>
24 #include <linux/irqreturn.h>
25 #include <linux/spinlock.h>
26 #include <linux/types.h>
27 
28 #define VGIC_NR_IRQS_LEGACY	256
29 #define VGIC_NR_SGIS		16
30 #define VGIC_NR_PPIS		16
31 #define VGIC_NR_PRIVATE_IRQS	(VGIC_NR_SGIS + VGIC_NR_PPIS)
32 
33 #define VGIC_V2_MAX_LRS		(1 << 6)
34 #define VGIC_V3_MAX_LRS		16
35 #define VGIC_MAX_IRQS		1024
36 
37 /* Sanity checks... */
38 #if (KVM_MAX_VCPUS > 8)
39 #error	Invalid number of CPU interfaces
40 #endif
41 
42 #if (VGIC_NR_IRQS_LEGACY & 31)
43 #error "VGIC_NR_IRQS must be a multiple of 32"
44 #endif
45 
46 #if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
47 #error "VGIC_NR_IRQS must be <= 1024"
48 #endif
49 
50 /*
51  * The GIC distributor registers describing interrupts have two parts:
52  * - 32 per-CPU interrupts (SGI + PPI)
53  * - a bunch of shared interrupts (SPI)
54  */
55 struct vgic_bitmap {
56 	/*
57 	 * - One UL per VCPU for private interrupts (assumes UL is at
58 	 *   least 32 bits)
59 	 * - As many UL as necessary for shared interrupts.
60 	 *
61 	 * The private interrupts are accessed via the "private"
62 	 * field, one UL per vcpu (the state for vcpu n is in
63 	 * private[n]). The shared interrupts are accessed via the
64 	 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
65 	 */
66 	unsigned long *private;
67 	unsigned long *shared;
68 };
69 
70 struct vgic_bytemap {
71 	/*
72 	 * - 8 u32 per VCPU for private interrupts
73 	 * - As many u32 as necessary for shared interrupts.
74 	 *
75 	 * The private interrupts are accessed via the "private"
76 	 * field, (the state for vcpu n is in private[n*8] to
77 	 * private[n*8 + 7]). The shared interrupts are accessed via
78 	 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
79 	 * shared[(n-32)/4] word).
80 	 */
81 	u32 *private;
82 	u32 *shared;
83 };
84 
85 struct kvm_vcpu;
86 
87 enum vgic_type {
88 	VGIC_V2,		/* Good ol' GICv2 */
89 	VGIC_V3,		/* New fancy GICv3 */
90 };
91 
92 #define LR_STATE_PENDING	(1 << 0)
93 #define LR_STATE_ACTIVE		(1 << 1)
94 #define LR_STATE_MASK		(3 << 0)
95 #define LR_EOI_INT		(1 << 2)
96 
97 struct vgic_lr {
98 	u16	irq;
99 	u8	source;
100 	u8	state;
101 };
102 
103 struct vgic_vmcr {
104 	u32	ctlr;
105 	u32	abpr;
106 	u32	bpr;
107 	u32	pmr;
108 };
109 
110 struct vgic_ops {
111 	struct vgic_lr	(*get_lr)(const struct kvm_vcpu *, int);
112 	void	(*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
113 	void	(*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
114 	u64	(*get_elrsr)(const struct kvm_vcpu *vcpu);
115 	u64	(*get_eisr)(const struct kvm_vcpu *vcpu);
116 	void	(*clear_eisr)(struct kvm_vcpu *vcpu);
117 	u32	(*get_interrupt_status)(const struct kvm_vcpu *vcpu);
118 	void	(*enable_underflow)(struct kvm_vcpu *vcpu);
119 	void	(*disable_underflow)(struct kvm_vcpu *vcpu);
120 	void	(*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
121 	void	(*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
122 	void	(*enable)(struct kvm_vcpu *vcpu);
123 };
124 
125 struct vgic_params {
126 	/* vgic type */
127 	enum vgic_type	type;
128 	/* Physical address of vgic virtual cpu interface */
129 	phys_addr_t	vcpu_base;
130 	/* Number of list registers */
131 	u32		nr_lr;
132 	/* Interrupt number */
133 	unsigned int	maint_irq;
134 	/* Virtual control interface base address */
135 	void __iomem	*vctrl_base;
136 };
137 
138 struct vgic_dist {
139 #ifdef CONFIG_KVM_ARM_VGIC
140 	spinlock_t		lock;
141 	bool			in_kernel;
142 	bool			ready;
143 
144 	int			nr_cpus;
145 	int			nr_irqs;
146 
147 	/* Virtual control interface mapping */
148 	void __iomem		*vctrl_base;
149 
150 	/* Distributor and vcpu interface mapping in the guest */
151 	phys_addr_t		vgic_dist_base;
152 	phys_addr_t		vgic_cpu_base;
153 
154 	/* Distributor enabled */
155 	u32			enabled;
156 
157 	/* Interrupt enabled (one bit per IRQ) */
158 	struct vgic_bitmap	irq_enabled;
159 
160 	/* Level-triggered interrupt external input is asserted */
161 	struct vgic_bitmap	irq_level;
162 
163 	/*
164 	 * Interrupt state is pending on the distributor
165 	 */
166 	struct vgic_bitmap	irq_pending;
167 
168 	/*
169 	 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
170 	 * interrupts.  Essentially holds the state of the flip-flop in
171 	 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
172 	 * Once set, it is only cleared for level-triggered interrupts on
173 	 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
174 	 */
175 	struct vgic_bitmap	irq_soft_pend;
176 
177 	/* Level-triggered interrupt queued on VCPU interface */
178 	struct vgic_bitmap	irq_queued;
179 
180 	/* Interrupt priority. Not used yet. */
181 	struct vgic_bytemap	irq_priority;
182 
183 	/* Level/edge triggered */
184 	struct vgic_bitmap	irq_cfg;
185 
186 	/*
187 	 * Source CPU per SGI and target CPU:
188 	 *
189 	 * Each byte represent a SGI observable on a VCPU, each bit of
190 	 * this byte indicating if the corresponding VCPU has
191 	 * generated this interrupt. This is a GICv2 feature only.
192 	 *
193 	 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
194 	 * the SGIs observable on VCPUn.
195 	 */
196 	u8			*irq_sgi_sources;
197 
198 	/*
199 	 * Target CPU for each SPI:
200 	 *
201 	 * Array of available SPI, each byte indicating the target
202 	 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
203 	 */
204 	u8			*irq_spi_cpu;
205 
206 	/*
207 	 * Reverse lookup of irq_spi_cpu for faster compute pending:
208 	 *
209 	 * Array of bitmaps, one per VCPU, describing if IRQn is
210 	 * routed to a particular VCPU.
211 	 */
212 	struct vgic_bitmap	*irq_spi_target;
213 
214 	/* Bitmap indicating which CPU has something pending */
215 	unsigned long		*irq_pending_on_cpu;
216 #endif
217 };
218 
219 struct vgic_v2_cpu_if {
220 	u32		vgic_hcr;
221 	u32		vgic_vmcr;
222 	u32		vgic_misr;	/* Saved only */
223 	u64		vgic_eisr;	/* Saved only */
224 	u64		vgic_elrsr;	/* Saved only */
225 	u32		vgic_apr;
226 	u32		vgic_lr[VGIC_V2_MAX_LRS];
227 };
228 
229 struct vgic_v3_cpu_if {
230 #ifdef CONFIG_ARM_GIC_V3
231 	u32		vgic_hcr;
232 	u32		vgic_vmcr;
233 	u32		vgic_misr;	/* Saved only */
234 	u32		vgic_eisr;	/* Saved only */
235 	u32		vgic_elrsr;	/* Saved only */
236 	u32		vgic_ap0r[4];
237 	u32		vgic_ap1r[4];
238 	u64		vgic_lr[VGIC_V3_MAX_LRS];
239 #endif
240 };
241 
242 struct vgic_cpu {
243 #ifdef CONFIG_KVM_ARM_VGIC
244 	/* per IRQ to LR mapping */
245 	u8		*vgic_irq_lr_map;
246 
247 	/* Pending interrupts on this VCPU */
248 	DECLARE_BITMAP(	pending_percpu, VGIC_NR_PRIVATE_IRQS);
249 	unsigned long	*pending_shared;
250 
251 	/* Bitmap of used/free list registers */
252 	DECLARE_BITMAP(	lr_used, VGIC_V2_MAX_LRS);
253 
254 	/* Number of list registers on this CPU */
255 	int		nr_lr;
256 
257 	/* CPU vif control registers for world switch */
258 	union {
259 		struct vgic_v2_cpu_if	vgic_v2;
260 		struct vgic_v3_cpu_if	vgic_v3;
261 	};
262 #endif
263 };
264 
265 #define LR_EMPTY	0xff
266 
267 #define INT_STATUS_EOI		(1 << 0)
268 #define INT_STATUS_UNDERFLOW	(1 << 1)
269 
270 struct kvm;
271 struct kvm_vcpu;
272 struct kvm_run;
273 struct kvm_exit_mmio;
274 
275 #ifdef CONFIG_KVM_ARM_VGIC
276 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
277 int kvm_vgic_hyp_init(void);
278 int kvm_vgic_map_resources(struct kvm *kvm);
279 int kvm_vgic_create(struct kvm *kvm);
280 void kvm_vgic_destroy(struct kvm *kvm);
281 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
282 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
283 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
284 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
285 			bool level);
286 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
287 bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
288 		      struct kvm_exit_mmio *mmio);
289 
290 #define irqchip_in_kernel(k)	(!!((k)->arch.vgic.in_kernel))
291 #define vgic_initialized(k)	((k)->arch.vgic.ready)
292 
293 int vgic_v2_probe(struct device_node *vgic_node,
294 		  const struct vgic_ops **ops,
295 		  const struct vgic_params **params);
296 #ifdef CONFIG_ARM_GIC_V3
297 int vgic_v3_probe(struct device_node *vgic_node,
298 		  const struct vgic_ops **ops,
299 		  const struct vgic_params **params);
300 #else
vgic_v3_probe(struct device_node * vgic_node,const struct vgic_ops ** ops,const struct vgic_params ** params)301 static inline int vgic_v3_probe(struct device_node *vgic_node,
302 				const struct vgic_ops **ops,
303 				const struct vgic_params **params)
304 {
305 	return -ENODEV;
306 }
307 #endif
308 
309 #else
kvm_vgic_hyp_init(void)310 static inline int kvm_vgic_hyp_init(void)
311 {
312 	return 0;
313 }
314 
kvm_vgic_set_addr(struct kvm * kvm,unsigned long type,u64 addr)315 static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
316 {
317 	return 0;
318 }
319 
kvm_vgic_addr(struct kvm * kvm,unsigned long type,u64 * addr,bool write)320 static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
321 {
322 	return -ENXIO;
323 }
324 
kvm_vgic_map_resources(struct kvm * kvm)325 static inline int kvm_vgic_map_resources(struct kvm *kvm)
326 {
327 	return 0;
328 }
329 
kvm_vgic_create(struct kvm * kvm)330 static inline int kvm_vgic_create(struct kvm *kvm)
331 {
332 	return 0;
333 }
334 
kvm_vgic_destroy(struct kvm * kvm)335 static inline void kvm_vgic_destroy(struct kvm *kvm)
336 {
337 }
338 
kvm_vgic_vcpu_destroy(struct kvm_vcpu * vcpu)339 static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
340 {
341 }
342 
kvm_vgic_vcpu_init(struct kvm_vcpu * vcpu)343 static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
344 {
345 	return 0;
346 }
347 
kvm_vgic_flush_hwstate(struct kvm_vcpu * vcpu)348 static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
kvm_vgic_sync_hwstate(struct kvm_vcpu * vcpu)349 static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
350 
kvm_vgic_inject_irq(struct kvm * kvm,int cpuid,unsigned int irq_num,bool level)351 static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
352 				      unsigned int irq_num, bool level)
353 {
354 	return 0;
355 }
356 
kvm_vgic_vcpu_pending_irq(struct kvm_vcpu * vcpu)357 static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
358 {
359 	return 0;
360 }
361 
vgic_handle_mmio(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_exit_mmio * mmio)362 static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
363 				    struct kvm_exit_mmio *mmio)
364 {
365 	return false;
366 }
367 
irqchip_in_kernel(struct kvm * kvm)368 static inline int irqchip_in_kernel(struct kvm *kvm)
369 {
370 	return 0;
371 }
372 
vgic_initialized(struct kvm * kvm)373 static inline bool vgic_initialized(struct kvm *kvm)
374 {
375 	return true;
376 }
377 #endif
378 
379 #endif
380