• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* SPDX-License-Identifier: GPL-2.0-only */
2  /*
3   *
4   * Copyright IBM Corp. 2008
5   *
6   * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7   */
8  
9  #ifndef __POWERPC_KVM_PPC_H__
10  #define __POWERPC_KVM_PPC_H__
11  
12  /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13   * dependencies. */
14  
15  #include <linux/mutex.h>
16  #include <linux/timer.h>
17  #include <linux/types.h>
18  #include <linux/kvm_types.h>
19  #include <linux/kvm_host.h>
20  #include <linux/bug.h>
21  #ifdef CONFIG_PPC_BOOK3S
22  #include <asm/kvm_book3s.h>
23  #else
24  #include <asm/kvm_booke.h>
25  #endif
26  #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27  #include <asm/paca.h>
28  #include <asm/xive.h>
29  #include <asm/cpu_has_feature.h>
30  #endif
31  
32  /*
33   * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
34   * for supporting software breakpoint.
35   */
36  #define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
37  
38  enum emulation_result {
39  	EMULATE_DONE,         /* no further processing */
40  	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
41  	EMULATE_FAIL,         /* can't emulate this instruction */
42  	EMULATE_AGAIN,        /* something went wrong. go again */
43  	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
44  };
45  
46  enum instruction_fetch_type {
47  	INST_GENERIC,
48  	INST_SC,		/* system call */
49  };
50  
51  enum xlate_instdata {
52  	XLATE_INST,		/* translate instruction address */
53  	XLATE_DATA		/* translate data address */
54  };
55  
56  enum xlate_readwrite {
57  	XLATE_READ,		/* check for read permissions */
58  	XLATE_WRITE		/* check for write permissions */
59  };
60  
61  extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
62  extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63  extern void kvmppc_handler_highmem(void);
64  
65  extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
66  extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
67                                unsigned int rt, unsigned int bytes,
68  			      int is_default_endian);
69  extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
70                                 unsigned int rt, unsigned int bytes,
71  			       int is_default_endian);
72  extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
73  				unsigned int rt, unsigned int bytes,
74  			int is_default_endian, int mmio_sign_extend);
75  extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
76  		unsigned int rt, unsigned int bytes, int is_default_endian);
77  extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
78  		unsigned int rs, unsigned int bytes, int is_default_endian);
79  extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
80  			       u64 val, unsigned int bytes,
81  			       int is_default_endian);
82  extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
83  				int rs, unsigned int bytes,
84  				int is_default_endian);
85  
86  extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
87  				 enum instruction_fetch_type type, u32 *inst);
88  
89  extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90  		     bool data);
91  extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92  		     bool data);
93  extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
94  extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95  extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
96  extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97  extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98  extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99  extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100  extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101  extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102  
103  /* Core-specific hooks */
104  
105  extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106                             unsigned int gtlb_idx);
107  extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108  extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109  extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
110  extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111  extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
112                                gva_t eaddr);
113  extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
114  extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
115  extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
116  			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
117  			struct kvmppc_pte *pte);
118  
119  extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
120  extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
121  extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
122  extern int kvmppc_core_check_processor_compat(void);
123  extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124                                        struct kvm_translation *tr);
125  
126  extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127  extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128  
129  extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130  extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131  extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
132  extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
133  extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
134  extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
135  extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
136  extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
137  extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
138  extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
139                                         struct kvm_interrupt *irq);
140  extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
141  extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
142  					ulong esr_flags);
143  extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
144  					   ulong dear_flags,
145  					   ulong esr_flags);
146  extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
147  extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
148  					   ulong esr_flags);
149  extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
150  extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
151  
152  extern int kvmppc_booke_init(void);
153  extern void kvmppc_booke_exit(void);
154  
155  extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
156  extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
157  extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
158  
159  extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
160  extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
161  extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
162  extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
163  extern void kvmppc_rmap_reset(struct kvm *kvm);
164  extern long kvmppc_prepare_vrma(struct kvm *kvm,
165  				struct kvm_userspace_memory_region *mem);
166  extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
167  			struct kvm_memory_slot *memslot, unsigned long porder);
168  extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
169  extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
170  		struct iommu_group *grp);
171  extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
172  		struct iommu_group *grp);
173  extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
174  extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
175  extern void kvmppc_setup_partition_table(struct kvm *kvm);
176  
177  extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
178  				struct kvm_create_spapr_tce_64 *args);
179  extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
180  		struct kvm *kvm, unsigned long liobn);
181  #define kvmppc_ioba_validate(stt, ioba, npages)                         \
182  		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
183  				(stt)->size, (ioba), (npages)) ?        \
184  				H_PARAMETER : H_SUCCESS)
185  extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
186  			     unsigned long ioba, unsigned long tce);
187  extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
188  		unsigned long liobn, unsigned long ioba,
189  		unsigned long tce_list, unsigned long npages);
190  extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
191  		unsigned long liobn, unsigned long ioba,
192  		unsigned long tce_value, unsigned long npages);
193  extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
194  			     unsigned long ioba);
195  extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
196  extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
197  extern int kvmppc_core_init_vm(struct kvm *kvm);
198  extern void kvmppc_core_destroy_vm(struct kvm *kvm);
199  extern void kvmppc_core_free_memslot(struct kvm *kvm,
200  				     struct kvm_memory_slot *slot);
201  extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
202  				struct kvm_memory_slot *memslot,
203  				const struct kvm_userspace_memory_region *mem,
204  				enum kvm_mr_change change);
205  extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
206  				const struct kvm_userspace_memory_region *mem,
207  				const struct kvm_memory_slot *old,
208  				const struct kvm_memory_slot *new,
209  				enum kvm_mr_change change);
210  extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
211  				      struct kvm_ppc_smmu_info *info);
212  extern void kvmppc_core_flush_memslot(struct kvm *kvm,
213  				      struct kvm_memory_slot *memslot);
214  
215  extern int kvmppc_bookehv_init(void);
216  extern void kvmppc_bookehv_exit(void);
217  
218  extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
219  
220  extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
221  extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
222  					    struct kvm_ppc_resize_hpt *rhpt);
223  extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
224  					   struct kvm_ppc_resize_hpt *rhpt);
225  
226  int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
227  
228  extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
229  extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
230  extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
231  
232  extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
233  				u32 priority);
234  extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
235  				u32 *priority);
236  extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
237  extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
238  
239  void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
240  void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
241  
242  union kvmppc_one_reg {
243  	u32	wval;
244  	u64	dval;
245  	vector128 vval;
246  	u64	vsxval[2];
247  	u32	vsx32val[4];
248  	u16	vsx16val[8];
249  	u8	vsx8val[16];
250  	struct {
251  		u64	addr;
252  		u64	length;
253  	}	vpaval;
254  	u64	xive_timaval[2];
255  };
256  
257  struct kvmppc_ops {
258  	struct module *owner;
259  	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
260  	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
261  	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
262  			   union kvmppc_one_reg *val);
263  	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
264  			   union kvmppc_one_reg *val);
265  	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
266  	void (*vcpu_put)(struct kvm_vcpu *vcpu);
267  	void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
268  	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
269  	int (*vcpu_run)(struct kvm_vcpu *vcpu);
270  	int (*vcpu_create)(struct kvm_vcpu *vcpu);
271  	void (*vcpu_free)(struct kvm_vcpu *vcpu);
272  	int (*check_requests)(struct kvm_vcpu *vcpu);
273  	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
274  	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
275  	int (*prepare_memory_region)(struct kvm *kvm,
276  				     struct kvm_memory_slot *memslot,
277  				     const struct kvm_userspace_memory_region *mem,
278  				     enum kvm_mr_change change);
279  	void (*commit_memory_region)(struct kvm *kvm,
280  				     const struct kvm_userspace_memory_region *mem,
281  				     const struct kvm_memory_slot *old,
282  				     const struct kvm_memory_slot *new,
283  				     enum kvm_mr_change change);
284  	int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
285  			   unsigned long end);
286  	int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
287  	int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
288  	void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
289  	void (*free_memslot)(struct kvm_memory_slot *slot);
290  	int (*init_vm)(struct kvm *kvm);
291  	void (*destroy_vm)(struct kvm *kvm);
292  	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
293  	int (*emulate_op)(struct kvm_vcpu *vcpu,
294  			  unsigned int inst, int *advance);
295  	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
296  	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
297  	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
298  	long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
299  			      unsigned long arg);
300  	int (*hcall_implemented)(unsigned long hcall);
301  	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
302  				       struct irq_bypass_producer *);
303  	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
304  					struct irq_bypass_producer *);
305  	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
306  	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
307  	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
308  			    unsigned long flags);
309  	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
310  	int (*enable_nested)(struct kvm *kvm);
311  	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
312  			       int size);
313  	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
314  			      int size);
315  	int (*enable_svm)(struct kvm *kvm);
316  	int (*svm_off)(struct kvm *kvm);
317  };
318  
319  extern struct kvmppc_ops *kvmppc_hv_ops;
320  extern struct kvmppc_ops *kvmppc_pr_ops;
321  
kvmppc_get_last_inst(struct kvm_vcpu * vcpu,enum instruction_fetch_type type,u32 * inst)322  static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
323  				enum instruction_fetch_type type, u32 *inst)
324  {
325  	int ret = EMULATE_DONE;
326  	u32 fetched_inst;
327  
328  	/* Load the instruction manually if it failed to do so in the
329  	 * exit path */
330  	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
331  		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
332  
333  	/*  Write fetch_failed unswapped if the fetch failed */
334  	if (ret == EMULATE_DONE)
335  		fetched_inst = kvmppc_need_byteswap(vcpu) ?
336  				swab32(vcpu->arch.last_inst) :
337  				vcpu->arch.last_inst;
338  	else
339  		fetched_inst = vcpu->arch.last_inst;
340  
341  	*inst = fetched_inst;
342  	return ret;
343  }
344  
is_kvmppc_hv_enabled(struct kvm * kvm)345  static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
346  {
347  	return kvm->arch.kvm_ops == kvmppc_hv_ops;
348  }
349  
350  extern int kvmppc_hwrng_present(void);
351  
352  /*
353   * Cuts out inst bits with ordering according to spec.
354   * That means the leftmost bit is zero. All given bits are included.
355   */
kvmppc_get_field(u64 inst,int msb,int lsb)356  static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
357  {
358  	u32 r;
359  	u32 mask;
360  
361  	BUG_ON(msb > lsb);
362  
363  	mask = (1 << (lsb - msb + 1)) - 1;
364  	r = (inst >> (63 - lsb)) & mask;
365  
366  	return r;
367  }
368  
369  /*
370   * Replaces inst bits with ordering according to spec.
371   */
kvmppc_set_field(u64 inst,int msb,int lsb,int value)372  static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
373  {
374  	u32 r;
375  	u32 mask;
376  
377  	BUG_ON(msb > lsb);
378  
379  	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
380  	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
381  
382  	return r;
383  }
384  
385  #define one_reg_size(id)	\
386  	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
387  
388  #define get_reg_val(id, reg)	({		\
389  	union kvmppc_one_reg __u;		\
390  	switch (one_reg_size(id)) {		\
391  	case 4: __u.wval = (reg); break;	\
392  	case 8: __u.dval = (reg); break;	\
393  	default: BUG();				\
394  	}					\
395  	__u;					\
396  })
397  
398  
399  #define set_reg_val(id, val)	({		\
400  	u64 __v;				\
401  	switch (one_reg_size(id)) {		\
402  	case 4: __v = (val).wval; break;	\
403  	case 8: __v = (val).dval; break;	\
404  	default: BUG();				\
405  	}					\
406  	__v;					\
407  })
408  
409  int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
410  int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
411  
412  int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
413  int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
414  
415  int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
416  int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
417  int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
418  int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
419  
420  void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
421  
422  struct openpic;
423  
424  #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
425  extern void kvm_cma_reserve(void) __init;
kvmppc_set_xics_phys(int cpu,unsigned long addr)426  static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
427  {
428  	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
429  }
430  
kvmppc_set_xive_tima(int cpu,unsigned long phys_addr,void __iomem * virt_addr)431  static inline void kvmppc_set_xive_tima(int cpu,
432  					unsigned long phys_addr,
433  					void __iomem *virt_addr)
434  {
435  	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
436  	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
437  }
438  
kvmppc_get_xics_latch(void)439  static inline u32 kvmppc_get_xics_latch(void)
440  {
441  	u32 xirr;
442  
443  	xirr = get_paca()->kvm_hstate.saved_xirr;
444  	get_paca()->kvm_hstate.saved_xirr = 0;
445  	return xirr;
446  }
447  
448  /*
449   * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
450   * a CPU thread that's running/napping inside of a guest is by default regarded
451   * as a request to wake the CPU (if needed) and continue execution within the
452   * guest, potentially to process new state like externally-generated
453   * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
454   *
455   * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
456   * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
457   * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
458   * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
459   * the receiving side prior to processing the IPI work.
460   *
461   * NOTE:
462   *
463   * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
464   * This is to guard against sequences such as the following:
465   *
466   *      CPU
467   *        X: smp_muxed_ipi_set_message():
468   *        X:   smp_mb()
469   *        X:   message[RESCHEDULE] = 1
470   *        X: doorbell_global_ipi(42):
471   *        X:   kvmppc_set_host_ipi(42)
472   *        X:   ppc_msgsnd_sync()/smp_mb()
473   *        X:   ppc_msgsnd() -> 42
474   *       42: doorbell_exception(): // from CPU X
475   *       42:   ppc_msgsync()
476   *      105: smp_muxed_ipi_set_message():
477   *      105:   smb_mb()
478   *           // STORE DEFERRED DUE TO RE-ORDERING
479   *    --105:   message[CALL_FUNCTION] = 1
480   *    | 105: doorbell_global_ipi(42):
481   *    | 105:   kvmppc_set_host_ipi(42)
482   *    |  42:   kvmppc_clear_host_ipi(42)
483   *    |  42: smp_ipi_demux_relaxed()
484   *    |  42: // returns to executing guest
485   *    |      // RE-ORDERED STORE COMPLETES
486   *    ->105:   message[CALL_FUNCTION] = 1
487   *      105:   ppc_msgsnd_sync()/smp_mb()
488   *      105:   ppc_msgsnd() -> 42
489   *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
490   *      105: // hangs waiting on 42 to process messages/call_single_queue
491   *
492   * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
493   * to guard against sequences such as the following (as well as to create
494   * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
495   *
496   *      CPU
497   *        X: smp_muxed_ipi_set_message():
498   *        X:   smp_mb()
499   *        X:   message[RESCHEDULE] = 1
500   *        X: doorbell_global_ipi(42):
501   *        X:   kvmppc_set_host_ipi(42)
502   *        X:   ppc_msgsnd_sync()/smp_mb()
503   *        X:   ppc_msgsnd() -> 42
504   *       42: doorbell_exception(): // from CPU X
505   *       42:   ppc_msgsync()
506   *           // STORE DEFERRED DUE TO RE-ORDERING
507   *    -- 42:   kvmppc_clear_host_ipi(42)
508   *    |  42: smp_ipi_demux_relaxed()
509   *    | 105: smp_muxed_ipi_set_message():
510   *    | 105:   smb_mb()
511   *    | 105:   message[CALL_FUNCTION] = 1
512   *    | 105: doorbell_global_ipi(42):
513   *    | 105:   kvmppc_set_host_ipi(42)
514   *    |      // RE-ORDERED STORE COMPLETES
515   *    -> 42:   kvmppc_clear_host_ipi(42)
516   *       42: // returns to executing guest
517   *      105:   ppc_msgsnd_sync()/smp_mb()
518   *      105:   ppc_msgsnd() -> 42
519   *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
520   *      105: // hangs waiting on 42 to process messages/call_single_queue
521   */
kvmppc_set_host_ipi(int cpu)522  static inline void kvmppc_set_host_ipi(int cpu)
523  {
524  	/*
525  	 * order stores of IPI messages vs. setting of host_ipi flag
526  	 *
527  	 * pairs with the barrier in kvmppc_clear_host_ipi()
528  	 */
529  	smp_mb();
530  	paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
531  }
532  
kvmppc_clear_host_ipi(int cpu)533  static inline void kvmppc_clear_host_ipi(int cpu)
534  {
535  	paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
536  	/*
537  	 * order clearing of host_ipi flag vs. processing of IPI messages
538  	 *
539  	 * pairs with the barrier in kvmppc_set_host_ipi()
540  	 */
541  	smp_mb();
542  }
543  
kvmppc_fast_vcpu_kick(struct kvm_vcpu * vcpu)544  static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
545  {
546  	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
547  }
548  
549  extern void kvm_hv_vm_activated(void);
550  extern void kvm_hv_vm_deactivated(void);
551  extern bool kvm_hv_mode_active(void);
552  
553  extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
554  					struct kvm_nested_guest *nested);
555  
556  #else
kvm_cma_reserve(void)557  static inline void __init kvm_cma_reserve(void)
558  {}
559  
kvmppc_set_xics_phys(int cpu,unsigned long addr)560  static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
561  {}
562  
kvmppc_set_xive_tima(int cpu,unsigned long phys_addr,void __iomem * virt_addr)563  static inline void kvmppc_set_xive_tima(int cpu,
564  					unsigned long phys_addr,
565  					void __iomem *virt_addr)
566  {}
567  
kvmppc_get_xics_latch(void)568  static inline u32 kvmppc_get_xics_latch(void)
569  {
570  	return 0;
571  }
572  
kvmppc_set_host_ipi(int cpu)573  static inline void kvmppc_set_host_ipi(int cpu)
574  {}
575  
kvmppc_clear_host_ipi(int cpu)576  static inline void kvmppc_clear_host_ipi(int cpu)
577  {}
578  
kvmppc_fast_vcpu_kick(struct kvm_vcpu * vcpu)579  static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
580  {
581  	kvm_vcpu_kick(vcpu);
582  }
583  
kvm_hv_mode_active(void)584  static inline bool kvm_hv_mode_active(void)		{ return false; }
585  
586  #endif
587  
588  #ifdef CONFIG_KVM_XICS
kvmppc_xics_enabled(struct kvm_vcpu * vcpu)589  static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
590  {
591  	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
592  }
593  
kvmppc_get_passthru_irqmap(struct kvm * kvm)594  static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
595  				struct kvm *kvm)
596  {
597  	if (kvm && kvm_irq_bypass)
598  		return kvm->arch.pimap;
599  	return NULL;
600  }
601  
602  extern void kvmppc_alloc_host_rm_ops(void);
603  extern void kvmppc_free_host_rm_ops(void);
604  extern void kvmppc_free_pimap(struct kvm *kvm);
605  extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
606  extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
607  extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
608  extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
609  extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
610  extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
611  			struct kvm_vcpu *vcpu, u32 cpu);
612  extern void kvmppc_xics_ipi_action(void);
613  extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
614  				   unsigned long host_irq);
615  extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
616  				   unsigned long host_irq);
617  extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
618  					struct kvmppc_irq_map *irq_map,
619  					struct kvmppc_passthru_irqmap *pimap,
620  					bool *again);
621  
622  extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
623  			       int level, bool line_status);
624  
625  extern int h_ipi_redirect;
626  #else
kvmppc_get_passthru_irqmap(struct kvm * kvm)627  static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
628  				struct kvm *kvm)
629  	{ return NULL; }
kvmppc_alloc_host_rm_ops(void)630  static inline void kvmppc_alloc_host_rm_ops(void) {};
kvmppc_free_host_rm_ops(void)631  static inline void kvmppc_free_host_rm_ops(void) {};
kvmppc_free_pimap(struct kvm * kvm)632  static inline void kvmppc_free_pimap(struct kvm *kvm) {};
kvmppc_xics_rm_complete(struct kvm_vcpu * vcpu,u32 hcall)633  static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
634  	{ return 0; }
kvmppc_xics_enabled(struct kvm_vcpu * vcpu)635  static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
636  	{ return 0; }
kvmppc_xics_free_icp(struct kvm_vcpu * vcpu)637  static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
kvmppc_xics_hcall(struct kvm_vcpu * vcpu,u32 cmd)638  static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
639  	{ return 0; }
640  #endif
641  
642  #ifdef CONFIG_KVM_XIVE
643  /*
644   * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
645   * ie. P9 new interrupt controller, while the second "xive" is the legacy
646   * "eXternal Interrupt Vector Entry" which is the configuration of an
647   * interrupt on the "xics" interrupt controller on P8 and earlier. Those
648   * two function consume or produce a legacy "XIVE" state from the
649   * new "XIVE" interrupt controller.
650   */
651  extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
652  				u32 priority);
653  extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
654  				u32 *priority);
655  extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
656  extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
657  extern void kvmppc_xive_init_module(void);
658  extern void kvmppc_xive_exit_module(void);
659  
660  extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
661  				    struct kvm_vcpu *vcpu, u32 cpu);
662  extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
663  extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
664  				  struct irq_desc *host_desc);
665  extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
666  				  struct irq_desc *host_desc);
667  extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
668  extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
669  
670  extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
671  			       int level, bool line_status);
672  extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
673  
kvmppc_xive_enabled(struct kvm_vcpu * vcpu)674  static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
675  {
676  	return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
677  }
678  
679  extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
680  					   struct kvm_vcpu *vcpu, u32 cpu);
681  extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
682  extern void kvmppc_xive_native_init_module(void);
683  extern void kvmppc_xive_native_exit_module(void);
684  extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
685  				     union kvmppc_one_reg *val);
686  extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
687  				     union kvmppc_one_reg *val);
688  extern bool kvmppc_xive_native_supported(void);
689  
690  #else
kvmppc_xive_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)691  static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
692  				       u32 priority) { return -1; }
kvmppc_xive_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)693  static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
694  				       u32 *priority) { return -1; }
kvmppc_xive_int_on(struct kvm * kvm,u32 irq)695  static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
kvmppc_xive_int_off(struct kvm * kvm,u32 irq)696  static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
kvmppc_xive_init_module(void)697  static inline void kvmppc_xive_init_module(void) { }
kvmppc_xive_exit_module(void)698  static inline void kvmppc_xive_exit_module(void) { }
699  
kvmppc_xive_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)700  static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
701  					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
kvmppc_xive_cleanup_vcpu(struct kvm_vcpu * vcpu)702  static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_set_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)703  static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
704  					 struct irq_desc *host_desc) { return -ENODEV; }
kvmppc_xive_clr_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)705  static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
706  					 struct irq_desc *host_desc) { return -ENODEV; }
kvmppc_xive_get_icp(struct kvm_vcpu * vcpu)707  static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
kvmppc_xive_set_icp(struct kvm_vcpu * vcpu,u64 icpval)708  static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
709  
kvmppc_xive_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)710  static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
711  				      int level, bool line_status) { return -ENODEV; }
kvmppc_xive_push_vcpu(struct kvm_vcpu * vcpu)712  static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
713  
kvmppc_xive_enabled(struct kvm_vcpu * vcpu)714  static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
715  	{ return 0; }
kvmppc_xive_native_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)716  static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
717  			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu * vcpu)718  static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
kvmppc_xive_native_init_module(void)719  static inline void kvmppc_xive_native_init_module(void) { }
kvmppc_xive_native_exit_module(void)720  static inline void kvmppc_xive_native_exit_module(void) { }
kvmppc_xive_native_get_vp(struct kvm_vcpu * vcpu,union kvmppc_one_reg * val)721  static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
722  					    union kvmppc_one_reg *val)
723  { return 0; }
kvmppc_xive_native_set_vp(struct kvm_vcpu * vcpu,union kvmppc_one_reg * val)724  static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
725  					    union kvmppc_one_reg *val)
726  { return -ENOENT; }
727  
728  #endif /* CONFIG_KVM_XIVE */
729  
730  #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
xics_on_xive(void)731  static inline bool xics_on_xive(void)
732  {
733  	return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
734  }
735  #else
xics_on_xive(void)736  static inline bool xics_on_xive(void)
737  {
738  	return false;
739  }
740  #endif
741  
742  /*
743   * Prototypes for functions called only from assembler code.
744   * Having prototypes reduces sparse errors.
745   */
746  long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
747  			 unsigned long ioba, unsigned long tce);
748  long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
749  				  unsigned long liobn, unsigned long ioba,
750  				  unsigned long tce_list, unsigned long npages);
751  long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
752  			   unsigned long liobn, unsigned long ioba,
753  			   unsigned long tce_value, unsigned long npages);
754  long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
755                              unsigned int yield_count);
756  long kvmppc_h_random(struct kvm_vcpu *vcpu);
757  void kvmhv_commence_exit(int trap);
758  void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
759  void kvmppc_subcore_enter_guest(void);
760  void kvmppc_subcore_exit_guest(void);
761  long kvmppc_realmode_hmi_handler(void);
762  long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
763                      long pte_index, unsigned long pteh, unsigned long ptel);
764  long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
765                       unsigned long pte_index, unsigned long avpn);
766  long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
767  long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
768                        unsigned long pte_index, unsigned long avpn,
769                        unsigned long va);
770  long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
771                     unsigned long pte_index);
772  long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
773                          unsigned long pte_index);
774  long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
775                          unsigned long pte_index);
776  long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
777  			   unsigned long dest, unsigned long src);
778  long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
779                            unsigned long slb_v, unsigned int status, bool data);
780  unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
781  unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
782  unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
783  int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
784                      unsigned long mfrr);
785  int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
786  int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
787  void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
788  
789  /*
790   * Host-side operations we want to set up while running in real
791   * mode in the guest operating on the xics.
792   * Currently only VCPU wakeup is supported.
793   */
794  
795  union kvmppc_rm_state {
796  	unsigned long raw;
797  	struct {
798  		u32 in_host;
799  		u32 rm_action;
800  	};
801  };
802  
803  struct kvmppc_host_rm_core {
804  	union kvmppc_rm_state rm_state;
805  	void *rm_data;
806  	char pad[112];
807  };
808  
809  struct kvmppc_host_rm_ops {
810  	struct kvmppc_host_rm_core	*rm_core;
811  	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
812  };
813  
814  extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
815  
kvmppc_get_epr(struct kvm_vcpu * vcpu)816  static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
817  {
818  #ifdef CONFIG_KVM_BOOKE_HV
819  	return mfspr(SPRN_GEPR);
820  #elif defined(CONFIG_BOOKE)
821  	return vcpu->arch.epr;
822  #else
823  	return 0;
824  #endif
825  }
826  
kvmppc_set_epr(struct kvm_vcpu * vcpu,u32 epr)827  static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
828  {
829  #ifdef CONFIG_KVM_BOOKE_HV
830  	mtspr(SPRN_GEPR, epr);
831  #elif defined(CONFIG_BOOKE)
832  	vcpu->arch.epr = epr;
833  #endif
834  }
835  
836  #ifdef CONFIG_KVM_MPIC
837  
838  void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
839  int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
840  			     u32 cpu);
841  void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
842  
843  #else
844  
kvmppc_mpic_set_epr(struct kvm_vcpu * vcpu)845  static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
846  {
847  }
848  
kvmppc_mpic_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)849  static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
850  		struct kvm_vcpu *vcpu, u32 cpu)
851  {
852  	return -EINVAL;
853  }
854  
kvmppc_mpic_disconnect_vcpu(struct openpic * opp,struct kvm_vcpu * vcpu)855  static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
856  		struct kvm_vcpu *vcpu)
857  {
858  }
859  
860  #endif /* CONFIG_KVM_MPIC */
861  
862  int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
863  			      struct kvm_config_tlb *cfg);
864  int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
865  			     struct kvm_dirty_tlb *cfg);
866  
867  long kvmppc_alloc_lpid(void);
868  void kvmppc_claim_lpid(long lpid);
869  void kvmppc_free_lpid(long lpid);
870  void kvmppc_init_lpid(unsigned long nr_lpids);
871  
kvmppc_mmu_flush_icache(kvm_pfn_t pfn)872  static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
873  {
874  	struct page *page;
875  	/*
876  	 * We can only access pages that the kernel maps
877  	 * as memory. Bail out for unmapped ones.
878  	 */
879  	if (!pfn_valid(pfn))
880  		return;
881  
882  	/* Clear i-cache for new pages */
883  	page = pfn_to_page(pfn);
884  	if (!test_bit(PG_arch_1, &page->flags)) {
885  		flush_dcache_icache_page(page);
886  		set_bit(PG_arch_1, &page->flags);
887  	}
888  }
889  
890  /*
891   * Shared struct helpers. The shared struct can be little or big endian,
892   * depending on the guest endianness. So expose helpers to all of them.
893   */
kvmppc_shared_big_endian(struct kvm_vcpu * vcpu)894  static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
895  {
896  #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
897  	/* Only Book3S_64 PR supports bi-endian for now */
898  	return vcpu->arch.shared_big_endian;
899  #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
900  	/* Book3s_64 HV on little endian is always little endian */
901  	return false;
902  #else
903  	return true;
904  #endif
905  }
906  
907  #define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
908  static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
909  {									\
910  	return mfspr(bookehv_spr);					\
911  }									\
912  
913  #define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
914  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
915  {									\
916  	mtspr(bookehv_spr, val);						\
917  }									\
918  
919  #define SHARED_WRAPPER_GET(reg, size)					\
920  static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
921  {									\
922  	if (kvmppc_shared_big_endian(vcpu))				\
923  	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
924  	else								\
925  	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
926  }									\
927  
928  #define SHARED_WRAPPER_SET(reg, size)					\
929  static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
930  {									\
931  	if (kvmppc_shared_big_endian(vcpu))				\
932  	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
933  	else								\
934  	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
935  }									\
936  
937  #define SHARED_WRAPPER(reg, size)					\
938  	SHARED_WRAPPER_GET(reg, size)					\
939  	SHARED_WRAPPER_SET(reg, size)					\
940  
941  #define SPRNG_WRAPPER(reg, bookehv_spr)					\
942  	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
943  	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
944  
945  #ifdef CONFIG_KVM_BOOKE_HV
946  
947  #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
948  	SPRNG_WRAPPER(reg, bookehv_spr)					\
949  
950  #else
951  
952  #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
953  	SHARED_WRAPPER(reg, size)					\
954  
955  #endif
956  
957  SHARED_WRAPPER(critical, 64)
958  SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
959  SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
960  SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
961  SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
962  SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
963  SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
964  SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
965  SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
966  SHARED_WRAPPER_GET(msr, 64)
kvmppc_set_msr_fast(struct kvm_vcpu * vcpu,u64 val)967  static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
968  {
969  	if (kvmppc_shared_big_endian(vcpu))
970  	       vcpu->arch.shared->msr = cpu_to_be64(val);
971  	else
972  	       vcpu->arch.shared->msr = cpu_to_le64(val);
973  }
974  SHARED_WRAPPER(dsisr, 32)
975  SHARED_WRAPPER(int_pending, 32)
976  SHARED_WRAPPER(sprg4, 64)
977  SHARED_WRAPPER(sprg5, 64)
978  SHARED_WRAPPER(sprg6, 64)
979  SHARED_WRAPPER(sprg7, 64)
980  
kvmppc_get_sr(struct kvm_vcpu * vcpu,int nr)981  static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
982  {
983  	if (kvmppc_shared_big_endian(vcpu))
984  	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
985  	else
986  	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
987  }
988  
kvmppc_set_sr(struct kvm_vcpu * vcpu,int nr,u32 val)989  static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
990  {
991  	if (kvmppc_shared_big_endian(vcpu))
992  	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
993  	else
994  	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
995  }
996  
997  /*
998   * Please call after prepare_to_enter. This function puts the lazy ee and irq
999   * disabled tracking state back to normal mode, without actually enabling
1000   * interrupts.
1001   */
kvmppc_fix_ee_before_entry(void)1002  static inline void kvmppc_fix_ee_before_entry(void)
1003  {
1004  	trace_hardirqs_on();
1005  
1006  #ifdef CONFIG_PPC64
1007  	/*
1008  	 * To avoid races, the caller must have gone directly from having
1009  	 * interrupts fully-enabled to hard-disabled.
1010  	 */
1011  	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1012  
1013  	/* Only need to enable IRQs by hard enabling them after this */
1014  	local_paca->irq_happened = 0;
1015  	irq_soft_mask_set(IRQS_ENABLED);
1016  #endif
1017  }
1018  
kvmppc_get_ea_indexed(struct kvm_vcpu * vcpu,int ra,int rb)1019  static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1020  {
1021  	ulong ea;
1022  	ulong msr_64bit = 0;
1023  
1024  	ea = kvmppc_get_gpr(vcpu, rb);
1025  	if (ra)
1026  		ea += kvmppc_get_gpr(vcpu, ra);
1027  
1028  #if defined(CONFIG_PPC_BOOK3E_64)
1029  	msr_64bit = MSR_CM;
1030  #elif defined(CONFIG_PPC_BOOK3S_64)
1031  	msr_64bit = MSR_SF;
1032  #endif
1033  
1034  	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1035  		ea = (uint32_t)ea;
1036  
1037  	return ea;
1038  }
1039  
1040  extern void xics_wake_cpu(int cpu);
1041  
1042  #endif /* __POWERPC_KVM_PPC_H__ */
1043