• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  *
6  * Derived from arch/arm/kvm/coproc.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Authors: Christoffer Dall <c.dall@virtualopensystems.com>
9  */
10 
11 #ifndef __ARM64_KVM_SYS_REGS_LOCAL_H__
12 #define __ARM64_KVM_SYS_REGS_LOCAL_H__
13 
14 #include <linux/bsearch.h>
15 
16 #define reg_to_encoding(x)						\
17 	sys_reg((u32)(x)->Op0, (u32)(x)->Op1,				\
18 		(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
19 
20 struct sys_reg_params {
21 	u8	Op0;
22 	u8	Op1;
23 	u8	CRn;
24 	u8	CRm;
25 	u8	Op2;
26 	u64	regval;
27 	bool	is_write;
28 };
29 
30 #define esr_sys64_to_params(esr)                                               \
31 	((struct sys_reg_params){ .Op0 = ((esr) >> 20) & 3,                    \
32 				  .Op1 = ((esr) >> 14) & 0x7,                  \
33 				  .CRn = ((esr) >> 10) & 0xf,                  \
34 				  .CRm = ((esr) >> 1) & 0xf,                   \
35 				  .Op2 = ((esr) >> 17) & 0x7,                  \
36 				  .is_write = !((esr) & 1) })
37 
38 struct sys_reg_desc {
39 	/* Sysreg string for debug */
40 	const char *name;
41 
42 	enum {
43 		AA32_ZEROHIGH,
44 		AA32_LO,
45 		AA32_HI,
46 	} aarch32_map;
47 
48 	/* MRS/MSR instruction which accesses it. */
49 	u8	Op0;
50 	u8	Op1;
51 	u8	CRn;
52 	u8	CRm;
53 	u8	Op2;
54 
55 	/* Trapped access from guest, if non-NULL. */
56 	bool (*access)(struct kvm_vcpu *,
57 		       struct sys_reg_params *,
58 		       const struct sys_reg_desc *);
59 
60 	/* Initialization for vcpu. */
61 	void (*reset)(struct kvm_vcpu *, const struct sys_reg_desc *);
62 
63 	/* Index into sys_reg[], or 0 if we don't need to save it. */
64 	int reg;
65 
66 	/* Value (usually reset value) */
67 	u64 val;
68 
69 	/* Custom get/set_user functions, fallback to generic if NULL */
70 	int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
71 			const struct kvm_one_reg *reg, void __user *uaddr);
72 	int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
73 			const struct kvm_one_reg *reg, void __user *uaddr);
74 
75 	/* Return mask of REG_* runtime visibility overrides */
76 	unsigned int (*visibility)(const struct kvm_vcpu *vcpu,
77 				   const struct sys_reg_desc *rd);
78 };
79 
80 #define REG_HIDDEN		(1 << 0) /* hidden from userspace and guest */
81 #define REG_RAZ			(1 << 1) /* RAZ from userspace and guest */
82 
83 static __printf(2, 3)
print_sys_reg_msg(const struct sys_reg_params * p,char * fmt,...)84 inline void print_sys_reg_msg(const struct sys_reg_params *p,
85 				       char *fmt, ...)
86 {
87 	va_list va;
88 
89 	va_start(va, fmt);
90 	/* Look, we even formatted it for you to paste into the table! */
91 	kvm_pr_unimpl("%pV { Op0(%2u), Op1(%2u), CRn(%2u), CRm(%2u), Op2(%2u), func_%s },\n",
92 		      &(struct va_format){ fmt, &va },
93 		      p->Op0, p->Op1, p->CRn, p->CRm, p->Op2, p->is_write ? "write" : "read");
94 	va_end(va);
95 }
96 
print_sys_reg_instr(const struct sys_reg_params * p)97 static inline void print_sys_reg_instr(const struct sys_reg_params *p)
98 {
99 	/* GCC warns on an empty format string */
100 	print_sys_reg_msg(p, "%s", "");
101 }
102 
ignore_write(struct kvm_vcpu * vcpu,const struct sys_reg_params * p)103 static inline bool ignore_write(struct kvm_vcpu *vcpu,
104 				const struct sys_reg_params *p)
105 {
106 	return true;
107 }
108 
read_zero(struct kvm_vcpu * vcpu,struct sys_reg_params * p)109 static inline bool read_zero(struct kvm_vcpu *vcpu,
110 			     struct sys_reg_params *p)
111 {
112 	p->regval = 0;
113 	return true;
114 }
115 
116 /* Reset functions */
reset_unknown(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)117 static inline void reset_unknown(struct kvm_vcpu *vcpu,
118 				 const struct sys_reg_desc *r)
119 {
120 	BUG_ON(!r->reg);
121 	BUG_ON(r->reg >= NR_SYS_REGS);
122 	__vcpu_sys_reg(vcpu, r->reg) = 0x1de7ec7edbadc0deULL;
123 }
124 
reset_val(struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)125 static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
126 {
127 	BUG_ON(!r->reg);
128 	BUG_ON(r->reg >= NR_SYS_REGS);
129 	__vcpu_sys_reg(vcpu, r->reg) = r->val;
130 }
131 
sysreg_hidden(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)132 static inline bool sysreg_hidden(const struct kvm_vcpu *vcpu,
133 				 const struct sys_reg_desc *r)
134 {
135 	if (likely(!r->visibility))
136 		return false;
137 
138 	return r->visibility(vcpu, r) & REG_HIDDEN;
139 }
140 
sysreg_visible_as_raz(const struct kvm_vcpu * vcpu,const struct sys_reg_desc * r)141 static inline bool sysreg_visible_as_raz(const struct kvm_vcpu *vcpu,
142 					 const struct sys_reg_desc *r)
143 {
144 	if (likely(!r->visibility))
145 		return false;
146 
147 	return r->visibility(vcpu, r) & REG_RAZ;
148 }
149 
cmp_sys_reg(const struct sys_reg_desc * i1,const struct sys_reg_desc * i2)150 static inline int cmp_sys_reg(const struct sys_reg_desc *i1,
151 			      const struct sys_reg_desc *i2)
152 {
153 	BUG_ON(i1 == i2);
154 	if (!i1)
155 		return 1;
156 	else if (!i2)
157 		return -1;
158 	if (i1->Op0 != i2->Op0)
159 		return i1->Op0 - i2->Op0;
160 	if (i1->Op1 != i2->Op1)
161 		return i1->Op1 - i2->Op1;
162 	if (i1->CRn != i2->CRn)
163 		return i1->CRn - i2->CRn;
164 	if (i1->CRm != i2->CRm)
165 		return i1->CRm - i2->CRm;
166 	return i1->Op2 - i2->Op2;
167 }
168 
match_sys_reg(const void * key,const void * elt)169 static inline int match_sys_reg(const void *key, const void *elt)
170 {
171 	const unsigned long pval = (unsigned long)key;
172 	const struct sys_reg_desc *r = elt;
173 
174 	return pval - reg_to_encoding(r);
175 }
176 
177 static inline const struct sys_reg_desc *
find_reg(const struct sys_reg_params * params,const struct sys_reg_desc table[],unsigned int num)178 find_reg(const struct sys_reg_params *params, const struct sys_reg_desc table[],
179 	 unsigned int num)
180 {
181 	unsigned long pval = reg_to_encoding(params);
182 
183 	return __inline_bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
184 }
185 
calculate_mpidr(const struct kvm_vcpu * vcpu)186 static inline u64 calculate_mpidr(const struct kvm_vcpu *vcpu)
187 {
188 	u64 mpidr;
189 
190 	/*
191 	 * Map the vcpu_id into the first three affinity level fields of
192 	 * the MPIDR. We limit the number of VCPUs in level 0 due to a
193 	 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
194 	 * of the GICv3 to be able to address each CPU directly when
195 	 * sending IPIs.
196 	 */
197 	mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
198 	mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
199 	mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
200 	mpidr |= (1ULL << 31);
201 
202 	return mpidr;
203 }
204 
205 const struct sys_reg_desc *find_reg_by_id(u64 id,
206 					  struct sys_reg_params *params,
207 					  const struct sys_reg_desc table[],
208 					  unsigned int num);
209 
210 #define AA32(_x)	.aarch32_map = AA32_##_x
211 #define Op0(_x) 	.Op0 = _x
212 #define Op1(_x) 	.Op1 = _x
213 #define CRn(_x)		.CRn = _x
214 #define CRm(_x) 	.CRm = _x
215 #define Op2(_x) 	.Op2 = _x
216 
217 #define SYS_DESC(reg)					\
218 	.name = #reg,					\
219 	Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)),	\
220 	CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)),	\
221 	Op2(sys_reg_Op2(reg))
222 
223 #endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
224