1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2012 Michael Ellerman, IBM Corporation.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/kvm_host.h>
8 #include <linux/kvm.h>
9 #include <linux/err.h>
10
11 #include <linux/uaccess.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/kvm_ppc.h>
14 #include <asm/hvcall.h>
15 #include <asm/rtas.h>
16 #include <asm/xive.h>
17
18 #ifdef CONFIG_KVM_XICS
kvm_rtas_set_xive(struct kvm_vcpu * vcpu,struct rtas_args * args)19 static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
20 {
21 u32 irq, server, priority;
22 int rc;
23
24 if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
25 rc = -3;
26 goto out;
27 }
28
29 irq = be32_to_cpu(args->args[0]);
30 server = be32_to_cpu(args->args[1]);
31 priority = be32_to_cpu(args->args[2]);
32
33 if (xics_on_xive())
34 rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
35 else
36 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
37 if (rc)
38 rc = -3;
39 out:
40 args->rets[0] = cpu_to_be32(rc);
41 }
42
kvm_rtas_get_xive(struct kvm_vcpu * vcpu,struct rtas_args * args)43 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
44 {
45 u32 irq, server, priority;
46 int rc;
47
48 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
49 rc = -3;
50 goto out;
51 }
52
53 irq = be32_to_cpu(args->args[0]);
54
55 server = priority = 0;
56 if (xics_on_xive())
57 rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
58 else
59 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
60 if (rc) {
61 rc = -3;
62 goto out;
63 }
64
65 args->rets[1] = cpu_to_be32(server);
66 args->rets[2] = cpu_to_be32(priority);
67 out:
68 args->rets[0] = cpu_to_be32(rc);
69 }
70
kvm_rtas_int_off(struct kvm_vcpu * vcpu,struct rtas_args * args)71 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
72 {
73 u32 irq;
74 int rc;
75
76 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
77 rc = -3;
78 goto out;
79 }
80
81 irq = be32_to_cpu(args->args[0]);
82
83 if (xics_on_xive())
84 rc = kvmppc_xive_int_off(vcpu->kvm, irq);
85 else
86 rc = kvmppc_xics_int_off(vcpu->kvm, irq);
87 if (rc)
88 rc = -3;
89 out:
90 args->rets[0] = cpu_to_be32(rc);
91 }
92
kvm_rtas_int_on(struct kvm_vcpu * vcpu,struct rtas_args * args)93 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
94 {
95 u32 irq;
96 int rc;
97
98 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
99 rc = -3;
100 goto out;
101 }
102
103 irq = be32_to_cpu(args->args[0]);
104
105 if (xics_on_xive())
106 rc = kvmppc_xive_int_on(vcpu->kvm, irq);
107 else
108 rc = kvmppc_xics_int_on(vcpu->kvm, irq);
109 if (rc)
110 rc = -3;
111 out:
112 args->rets[0] = cpu_to_be32(rc);
113 }
114 #endif /* CONFIG_KVM_XICS */
115
116 struct rtas_handler {
117 void (*handler)(struct kvm_vcpu *vcpu, struct rtas_args *args);
118 char *name;
119 };
120
121 static struct rtas_handler rtas_handlers[] = {
122 #ifdef CONFIG_KVM_XICS
123 { .name = "ibm,set-xive", .handler = kvm_rtas_set_xive },
124 { .name = "ibm,get-xive", .handler = kvm_rtas_get_xive },
125 { .name = "ibm,int-off", .handler = kvm_rtas_int_off },
126 { .name = "ibm,int-on", .handler = kvm_rtas_int_on },
127 #endif
128 };
129
130 struct rtas_token_definition {
131 struct list_head list;
132 struct rtas_handler *handler;
133 u64 token;
134 };
135
rtas_name_matches(char * s1,char * s2)136 static int rtas_name_matches(char *s1, char *s2)
137 {
138 struct kvm_rtas_token_args args;
139 return !strncmp(s1, s2, sizeof(args.name));
140 }
141
rtas_token_undefine(struct kvm * kvm,char * name)142 static int rtas_token_undefine(struct kvm *kvm, char *name)
143 {
144 struct rtas_token_definition *d, *tmp;
145
146 lockdep_assert_held(&kvm->arch.rtas_token_lock);
147
148 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
149 if (rtas_name_matches(d->handler->name, name)) {
150 list_del(&d->list);
151 kfree(d);
152 return 0;
153 }
154 }
155
156 /* It's not an error to undefine an undefined token */
157 return 0;
158 }
159
rtas_token_define(struct kvm * kvm,char * name,u64 token)160 static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
161 {
162 struct rtas_token_definition *d;
163 struct rtas_handler *h = NULL;
164 bool found;
165 int i;
166
167 lockdep_assert_held(&kvm->arch.rtas_token_lock);
168
169 list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
170 if (d->token == token)
171 return -EEXIST;
172 }
173
174 found = false;
175 for (i = 0; i < ARRAY_SIZE(rtas_handlers); i++) {
176 h = &rtas_handlers[i];
177 if (rtas_name_matches(h->name, name)) {
178 found = true;
179 break;
180 }
181 }
182
183 if (!found)
184 return -ENOENT;
185
186 d = kzalloc(sizeof(*d), GFP_KERNEL);
187 if (!d)
188 return -ENOMEM;
189
190 d->handler = h;
191 d->token = token;
192
193 list_add_tail(&d->list, &kvm->arch.rtas_tokens);
194
195 return 0;
196 }
197
kvm_vm_ioctl_rtas_define_token(struct kvm * kvm,void __user * argp)198 int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
199 {
200 struct kvm_rtas_token_args args;
201 int rc;
202
203 if (copy_from_user(&args, argp, sizeof(args)))
204 return -EFAULT;
205
206 mutex_lock(&kvm->arch.rtas_token_lock);
207
208 if (args.token)
209 rc = rtas_token_define(kvm, args.name, args.token);
210 else
211 rc = rtas_token_undefine(kvm, args.name);
212
213 mutex_unlock(&kvm->arch.rtas_token_lock);
214
215 return rc;
216 }
217
kvmppc_rtas_hcall(struct kvm_vcpu * vcpu)218 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
219 {
220 struct rtas_token_definition *d;
221 struct rtas_args args;
222 rtas_arg_t *orig_rets;
223 gpa_t args_phys;
224 int rc;
225
226 /*
227 * r4 contains the guest physical address of the RTAS args
228 * Mask off the top 4 bits since this is a guest real address
229 */
230 args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
231
232 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
233 rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
234 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
235 if (rc)
236 goto fail;
237
238 /*
239 * args->rets is a pointer into args->args. Now that we've
240 * copied args we need to fix it up to point into our copy,
241 * not the guest args. We also need to save the original
242 * value so we can restore it on the way out.
243 */
244 orig_rets = args.rets;
245 if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
246 /*
247 * Don't overflow our args array: ensure there is room for
248 * at least rets[0] (even if the call specifies 0 nret).
249 *
250 * Each handler must then check for the correct nargs and nret
251 * values, but they may always return failure in rets[0].
252 */
253 rc = -EINVAL;
254 goto fail;
255 }
256 args.rets = &args.args[be32_to_cpu(args.nargs)];
257
258 mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
259
260 rc = -ENOENT;
261 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
262 if (d->token == be32_to_cpu(args.token)) {
263 d->handler->handler(vcpu, &args);
264 rc = 0;
265 break;
266 }
267 }
268
269 mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
270
271 if (rc == 0) {
272 args.rets = orig_rets;
273 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
274 if (rc)
275 goto fail;
276 }
277
278 return rc;
279
280 fail:
281 /*
282 * We only get here if the guest has called RTAS with a bogus
283 * args pointer or nargs/nret values that would overflow the
284 * array. That means we can't get to the args, and so we can't
285 * fail the RTAS call. So fail right out to userspace, which
286 * should kill the guest.
287 *
288 * SLOF should actually pass the hcall return value from the
289 * rtas handler call in r3, so enter_rtas could be modified to
290 * return a failure indication in r3 and we could return such
291 * errors to the guest rather than failing to host userspace.
292 * However old guests that don't test for failure could then
293 * continue silently after errors, so for now we won't do this.
294 */
295 return rc;
296 }
297 EXPORT_SYMBOL_GPL(kvmppc_rtas_hcall);
298
kvmppc_rtas_tokens_free(struct kvm * kvm)299 void kvmppc_rtas_tokens_free(struct kvm *kvm)
300 {
301 struct rtas_token_definition *d, *tmp;
302
303 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
304 list_del(&d->list);
305 kfree(d);
306 }
307 }
308