1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
4 *
5 * Authors:
6 * Alexander Graf <agraf@suse.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28
29 #include <asm/reg.h>
30 #include <asm/sections.h>
31 #include <asm/cacheflush.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34
35 #define KVM_MAGIC_PAGE (-4096L)
36 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
37
38 #define KVM_INST_LWZ 0x80000000
39 #define KVM_INST_STW 0x90000000
40 #define KVM_INST_LD 0xe8000000
41 #define KVM_INST_STD 0xf8000000
42 #define KVM_INST_NOP 0x60000000
43 #define KVM_INST_B 0x48000000
44 #define KVM_INST_B_MASK 0x03ffffff
45 #define KVM_INST_B_MAX 0x01ffffff
46 #define KVM_INST_LI 0x38000000
47
48 #define KVM_MASK_RT 0x03e00000
49 #define KVM_RT_30 0x03c00000
50 #define KVM_MASK_RB 0x0000f800
51 #define KVM_INST_MFMSR 0x7c0000a6
52
53 #define SPR_FROM 0
54 #define SPR_TO 0x100
55
56 #define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
57 (((sprn) & 0x1f) << 16) | \
58 (((sprn) & 0x3e0) << 6) | \
59 (moveto))
60
61 #define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
62 #define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
63
64 #define KVM_INST_TLBSYNC 0x7c00046c
65 #define KVM_INST_MTMSRD_L0 0x7c000164
66 #define KVM_INST_MTMSRD_L1 0x7c010164
67 #define KVM_INST_MTMSR 0x7c000124
68
69 #define KVM_INST_WRTEE 0x7c000106
70 #define KVM_INST_WRTEEI_0 0x7c000146
71 #define KVM_INST_WRTEEI_1 0x7c008146
72
73 #define KVM_INST_MTSRIN 0x7c0001e4
74
75 static bool kvm_patching_worked = true;
76 static char kvm_tmp[1024 * 1024];
77 static int kvm_tmp_index;
78
kvm_patch_ins(u32 * inst,u32 new_inst)79 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
80 {
81 *inst = new_inst;
82 flush_icache_range((ulong)inst, (ulong)inst + 4);
83 }
84
kvm_patch_ins_ll(u32 * inst,long addr,u32 rt)85 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
86 {
87 #ifdef CONFIG_64BIT
88 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
89 #else
90 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
91 #endif
92 }
93
kvm_patch_ins_ld(u32 * inst,long addr,u32 rt)94 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
95 {
96 #ifdef CONFIG_64BIT
97 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
98 #else
99 kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
100 #endif
101 }
102
kvm_patch_ins_lwz(u32 * inst,long addr,u32 rt)103 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
104 {
105 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
106 }
107
kvm_patch_ins_std(u32 * inst,long addr,u32 rt)108 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
109 {
110 #ifdef CONFIG_64BIT
111 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
112 #else
113 kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
114 #endif
115 }
116
kvm_patch_ins_stw(u32 * inst,long addr,u32 rt)117 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
118 {
119 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
120 }
121
kvm_patch_ins_nop(u32 * inst)122 static void kvm_patch_ins_nop(u32 *inst)
123 {
124 kvm_patch_ins(inst, KVM_INST_NOP);
125 }
126
kvm_patch_ins_b(u32 * inst,int addr)127 static void kvm_patch_ins_b(u32 *inst, int addr)
128 {
129 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
130 /* On relocatable kernels interrupts handlers and our code
131 can be in different regions, so we don't patch them */
132
133 if ((ulong)inst < (ulong)&__end_interrupts)
134 return;
135 #endif
136
137 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
138 }
139
kvm_alloc(int len)140 static u32 *kvm_alloc(int len)
141 {
142 u32 *p;
143
144 if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
145 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
146 kvm_tmp_index, len);
147 kvm_patching_worked = false;
148 return NULL;
149 }
150
151 p = (void*)&kvm_tmp[kvm_tmp_index];
152 kvm_tmp_index += len;
153
154 return p;
155 }
156
157 extern u32 kvm_emulate_mtmsrd_branch_offs;
158 extern u32 kvm_emulate_mtmsrd_reg_offs;
159 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
160 extern u32 kvm_emulate_mtmsrd_len;
161 extern u32 kvm_emulate_mtmsrd[];
162
kvm_patch_ins_mtmsrd(u32 * inst,u32 rt)163 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
164 {
165 u32 *p;
166 int distance_start;
167 int distance_end;
168 ulong next_inst;
169
170 p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
171 if (!p)
172 return;
173
174 /* Find out where we are and put everything there */
175 distance_start = (ulong)p - (ulong)inst;
176 next_inst = ((ulong)inst + 4);
177 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
178
179 /* Make sure we only write valid b instructions */
180 if (distance_start > KVM_INST_B_MAX) {
181 kvm_patching_worked = false;
182 return;
183 }
184
185 /* Modify the chunk to fit the invocation */
186 memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
187 p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
188 switch (get_rt(rt)) {
189 case 30:
190 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
191 magic_var(scratch2), KVM_RT_30);
192 break;
193 case 31:
194 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
195 magic_var(scratch1), KVM_RT_30);
196 break;
197 default:
198 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
199 break;
200 }
201
202 p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
203 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
204
205 /* Patch the invocation */
206 kvm_patch_ins_b(inst, distance_start);
207 }
208
209 extern u32 kvm_emulate_mtmsr_branch_offs;
210 extern u32 kvm_emulate_mtmsr_reg1_offs;
211 extern u32 kvm_emulate_mtmsr_reg2_offs;
212 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
213 extern u32 kvm_emulate_mtmsr_len;
214 extern u32 kvm_emulate_mtmsr[];
215
kvm_patch_ins_mtmsr(u32 * inst,u32 rt)216 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
217 {
218 u32 *p;
219 int distance_start;
220 int distance_end;
221 ulong next_inst;
222
223 p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
224 if (!p)
225 return;
226
227 /* Find out where we are and put everything there */
228 distance_start = (ulong)p - (ulong)inst;
229 next_inst = ((ulong)inst + 4);
230 distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
231
232 /* Make sure we only write valid b instructions */
233 if (distance_start > KVM_INST_B_MAX) {
234 kvm_patching_worked = false;
235 return;
236 }
237
238 /* Modify the chunk to fit the invocation */
239 memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
240 p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
241
242 /* Make clobbered registers work too */
243 switch (get_rt(rt)) {
244 case 30:
245 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
246 magic_var(scratch2), KVM_RT_30);
247 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
248 magic_var(scratch2), KVM_RT_30);
249 break;
250 case 31:
251 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
252 magic_var(scratch1), KVM_RT_30);
253 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
254 magic_var(scratch1), KVM_RT_30);
255 break;
256 default:
257 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
258 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
259 break;
260 }
261
262 p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
263 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
264
265 /* Patch the invocation */
266 kvm_patch_ins_b(inst, distance_start);
267 }
268
269 #ifdef CONFIG_BOOKE
270
271 extern u32 kvm_emulate_wrtee_branch_offs;
272 extern u32 kvm_emulate_wrtee_reg_offs;
273 extern u32 kvm_emulate_wrtee_orig_ins_offs;
274 extern u32 kvm_emulate_wrtee_len;
275 extern u32 kvm_emulate_wrtee[];
276
kvm_patch_ins_wrtee(u32 * inst,u32 rt,int imm_one)277 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
278 {
279 u32 *p;
280 int distance_start;
281 int distance_end;
282 ulong next_inst;
283
284 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
285 if (!p)
286 return;
287
288 /* Find out where we are and put everything there */
289 distance_start = (ulong)p - (ulong)inst;
290 next_inst = ((ulong)inst + 4);
291 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
292
293 /* Make sure we only write valid b instructions */
294 if (distance_start > KVM_INST_B_MAX) {
295 kvm_patching_worked = false;
296 return;
297 }
298
299 /* Modify the chunk to fit the invocation */
300 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
301 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
302
303 if (imm_one) {
304 p[kvm_emulate_wrtee_reg_offs] =
305 KVM_INST_LI | __PPC_RT(30) | MSR_EE;
306 } else {
307 /* Make clobbered registers work too */
308 switch (get_rt(rt)) {
309 case 30:
310 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
311 magic_var(scratch2), KVM_RT_30);
312 break;
313 case 31:
314 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
315 magic_var(scratch1), KVM_RT_30);
316 break;
317 default:
318 p[kvm_emulate_wrtee_reg_offs] |= rt;
319 break;
320 }
321 }
322
323 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
324 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
325
326 /* Patch the invocation */
327 kvm_patch_ins_b(inst, distance_start);
328 }
329
330 extern u32 kvm_emulate_wrteei_0_branch_offs;
331 extern u32 kvm_emulate_wrteei_0_len;
332 extern u32 kvm_emulate_wrteei_0[];
333
kvm_patch_ins_wrteei_0(u32 * inst)334 static void kvm_patch_ins_wrteei_0(u32 *inst)
335 {
336 u32 *p;
337 int distance_start;
338 int distance_end;
339 ulong next_inst;
340
341 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
342 if (!p)
343 return;
344
345 /* Find out where we are and put everything there */
346 distance_start = (ulong)p - (ulong)inst;
347 next_inst = ((ulong)inst + 4);
348 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
349
350 /* Make sure we only write valid b instructions */
351 if (distance_start > KVM_INST_B_MAX) {
352 kvm_patching_worked = false;
353 return;
354 }
355
356 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
357 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
358 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
359
360 /* Patch the invocation */
361 kvm_patch_ins_b(inst, distance_start);
362 }
363
364 #endif
365
366 #ifdef CONFIG_PPC_BOOK3S_32
367
368 extern u32 kvm_emulate_mtsrin_branch_offs;
369 extern u32 kvm_emulate_mtsrin_reg1_offs;
370 extern u32 kvm_emulate_mtsrin_reg2_offs;
371 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
372 extern u32 kvm_emulate_mtsrin_len;
373 extern u32 kvm_emulate_mtsrin[];
374
kvm_patch_ins_mtsrin(u32 * inst,u32 rt,u32 rb)375 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
376 {
377 u32 *p;
378 int distance_start;
379 int distance_end;
380 ulong next_inst;
381
382 p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
383 if (!p)
384 return;
385
386 /* Find out where we are and put everything there */
387 distance_start = (ulong)p - (ulong)inst;
388 next_inst = ((ulong)inst + 4);
389 distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
390
391 /* Make sure we only write valid b instructions */
392 if (distance_start > KVM_INST_B_MAX) {
393 kvm_patching_worked = false;
394 return;
395 }
396
397 /* Modify the chunk to fit the invocation */
398 memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
399 p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
400 p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
401 p[kvm_emulate_mtsrin_reg2_offs] |= rt;
402 p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
403 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
404
405 /* Patch the invocation */
406 kvm_patch_ins_b(inst, distance_start);
407 }
408
409 #endif
410
kvm_map_magic_page(void * data)411 static void kvm_map_magic_page(void *data)
412 {
413 u32 *features = data;
414
415 ulong in[8];
416 ulong out[8];
417
418 in[0] = KVM_MAGIC_PAGE;
419 in[1] = KVM_MAGIC_PAGE;
420
421 kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
422
423 *features = out[0];
424 }
425
kvm_check_ins(u32 * inst,u32 features)426 static void kvm_check_ins(u32 *inst, u32 features)
427 {
428 u32 _inst = *inst;
429 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
430 u32 inst_rt = _inst & KVM_MASK_RT;
431
432 switch (inst_no_rt) {
433 /* Loads */
434 case KVM_INST_MFMSR:
435 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
436 break;
437 case KVM_INST_MFSPR(SPRN_SPRG0):
438 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
439 break;
440 case KVM_INST_MFSPR(SPRN_SPRG1):
441 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
442 break;
443 case KVM_INST_MFSPR(SPRN_SPRG2):
444 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
445 break;
446 case KVM_INST_MFSPR(SPRN_SPRG3):
447 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
448 break;
449 case KVM_INST_MFSPR(SPRN_SRR0):
450 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
451 break;
452 case KVM_INST_MFSPR(SPRN_SRR1):
453 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
454 break;
455 #ifdef CONFIG_BOOKE
456 case KVM_INST_MFSPR(SPRN_DEAR):
457 #else
458 case KVM_INST_MFSPR(SPRN_DAR):
459 #endif
460 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
461 break;
462 case KVM_INST_MFSPR(SPRN_DSISR):
463 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
464 break;
465
466 #ifdef CONFIG_PPC_BOOK3E_MMU
467 case KVM_INST_MFSPR(SPRN_MAS0):
468 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
470 break;
471 case KVM_INST_MFSPR(SPRN_MAS1):
472 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
474 break;
475 case KVM_INST_MFSPR(SPRN_MAS2):
476 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
478 break;
479 case KVM_INST_MFSPR(SPRN_MAS3):
480 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
482 break;
483 case KVM_INST_MFSPR(SPRN_MAS4):
484 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
486 break;
487 case KVM_INST_MFSPR(SPRN_MAS6):
488 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
489 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
490 break;
491 case KVM_INST_MFSPR(SPRN_MAS7):
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
494 break;
495 #endif /* CONFIG_PPC_BOOK3E_MMU */
496
497 case KVM_INST_MFSPR(SPRN_SPRG4):
498 #ifdef CONFIG_BOOKE
499 case KVM_INST_MFSPR(SPRN_SPRG4R):
500 #endif
501 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
502 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
503 break;
504 case KVM_INST_MFSPR(SPRN_SPRG5):
505 #ifdef CONFIG_BOOKE
506 case KVM_INST_MFSPR(SPRN_SPRG5R):
507 #endif
508 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
509 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
510 break;
511 case KVM_INST_MFSPR(SPRN_SPRG6):
512 #ifdef CONFIG_BOOKE
513 case KVM_INST_MFSPR(SPRN_SPRG6R):
514 #endif
515 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
516 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
517 break;
518 case KVM_INST_MFSPR(SPRN_SPRG7):
519 #ifdef CONFIG_BOOKE
520 case KVM_INST_MFSPR(SPRN_SPRG7R):
521 #endif
522 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
523 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
524 break;
525
526 #ifdef CONFIG_BOOKE
527 case KVM_INST_MFSPR(SPRN_ESR):
528 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
529 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
530 break;
531 #endif
532
533 case KVM_INST_MFSPR(SPRN_PIR):
534 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
535 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
536 break;
537
538
539 /* Stores */
540 case KVM_INST_MTSPR(SPRN_SPRG0):
541 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
542 break;
543 case KVM_INST_MTSPR(SPRN_SPRG1):
544 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
545 break;
546 case KVM_INST_MTSPR(SPRN_SPRG2):
547 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
548 break;
549 case KVM_INST_MTSPR(SPRN_SPRG3):
550 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
551 break;
552 case KVM_INST_MTSPR(SPRN_SRR0):
553 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
554 break;
555 case KVM_INST_MTSPR(SPRN_SRR1):
556 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
557 break;
558 #ifdef CONFIG_BOOKE
559 case KVM_INST_MTSPR(SPRN_DEAR):
560 #else
561 case KVM_INST_MTSPR(SPRN_DAR):
562 #endif
563 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
564 break;
565 case KVM_INST_MTSPR(SPRN_DSISR):
566 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
567 break;
568 #ifdef CONFIG_PPC_BOOK3E_MMU
569 case KVM_INST_MTSPR(SPRN_MAS0):
570 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
572 break;
573 case KVM_INST_MTSPR(SPRN_MAS1):
574 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
576 break;
577 case KVM_INST_MTSPR(SPRN_MAS2):
578 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
580 break;
581 case KVM_INST_MTSPR(SPRN_MAS3):
582 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
584 break;
585 case KVM_INST_MTSPR(SPRN_MAS4):
586 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
588 break;
589 case KVM_INST_MTSPR(SPRN_MAS6):
590 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
591 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
592 break;
593 case KVM_INST_MTSPR(SPRN_MAS7):
594 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
595 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
596 break;
597 #endif /* CONFIG_PPC_BOOK3E_MMU */
598
599 case KVM_INST_MTSPR(SPRN_SPRG4):
600 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
602 break;
603 case KVM_INST_MTSPR(SPRN_SPRG5):
604 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
606 break;
607 case KVM_INST_MTSPR(SPRN_SPRG6):
608 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
609 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
610 break;
611 case KVM_INST_MTSPR(SPRN_SPRG7):
612 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
613 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
614 break;
615
616 #ifdef CONFIG_BOOKE
617 case KVM_INST_MTSPR(SPRN_ESR):
618 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
619 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
620 break;
621 #endif
622
623 /* Nops */
624 case KVM_INST_TLBSYNC:
625 kvm_patch_ins_nop(inst);
626 break;
627
628 /* Rewrites */
629 case KVM_INST_MTMSRD_L1:
630 kvm_patch_ins_mtmsrd(inst, inst_rt);
631 break;
632 case KVM_INST_MTMSR:
633 case KVM_INST_MTMSRD_L0:
634 kvm_patch_ins_mtmsr(inst, inst_rt);
635 break;
636 #ifdef CONFIG_BOOKE
637 case KVM_INST_WRTEE:
638 kvm_patch_ins_wrtee(inst, inst_rt, 0);
639 break;
640 #endif
641 }
642
643 switch (inst_no_rt & ~KVM_MASK_RB) {
644 #ifdef CONFIG_PPC_BOOK3S_32
645 case KVM_INST_MTSRIN:
646 if (features & KVM_MAGIC_FEAT_SR) {
647 u32 inst_rb = _inst & KVM_MASK_RB;
648 kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
649 }
650 break;
651 break;
652 #endif
653 }
654
655 switch (_inst) {
656 #ifdef CONFIG_BOOKE
657 case KVM_INST_WRTEEI_0:
658 kvm_patch_ins_wrteei_0(inst);
659 break;
660
661 case KVM_INST_WRTEEI_1:
662 kvm_patch_ins_wrtee(inst, 0, 1);
663 break;
664 #endif
665 }
666 }
667
668 extern u32 kvm_template_start[];
669 extern u32 kvm_template_end[];
670
kvm_use_magic_page(void)671 static void kvm_use_magic_page(void)
672 {
673 u32 *p;
674 u32 *start, *end;
675 u32 tmp;
676 u32 features;
677
678 /* Tell the host to map the magic page to -4096 on all CPUs */
679 on_each_cpu(kvm_map_magic_page, &features, 1);
680
681 /* Quick self-test to see if the mapping works */
682 if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
683 kvm_patching_worked = false;
684 return;
685 }
686
687 /* Now loop through all code and find instructions */
688 start = (void*)_stext;
689 end = (void*)_etext;
690
691 /*
692 * Being interrupted in the middle of patching would
693 * be bad for SPRG4-7, which KVM can't keep in sync
694 * with emulated accesses because reads don't trap.
695 */
696 local_irq_disable();
697
698 for (p = start; p < end; p++) {
699 /* Avoid patching the template code */
700 if (p >= kvm_template_start && p < kvm_template_end) {
701 p = kvm_template_end - 1;
702 continue;
703 }
704 kvm_check_ins(p, features);
705 }
706
707 local_irq_enable();
708
709 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
710 kvm_patching_worked ? "worked" : "failed");
711 }
712
kvm_hypercall(unsigned long * in,unsigned long * out,unsigned long nr)713 unsigned long kvm_hypercall(unsigned long *in,
714 unsigned long *out,
715 unsigned long nr)
716 {
717 unsigned long register r0 asm("r0");
718 unsigned long register r3 asm("r3") = in[0];
719 unsigned long register r4 asm("r4") = in[1];
720 unsigned long register r5 asm("r5") = in[2];
721 unsigned long register r6 asm("r6") = in[3];
722 unsigned long register r7 asm("r7") = in[4];
723 unsigned long register r8 asm("r8") = in[5];
724 unsigned long register r9 asm("r9") = in[6];
725 unsigned long register r10 asm("r10") = in[7];
726 unsigned long register r11 asm("r11") = nr;
727 unsigned long register r12 asm("r12");
728
729 asm volatile("bl kvm_hypercall_start"
730 : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
731 "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
732 "=r"(r12)
733 : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
734 "r"(r9), "r"(r10), "r"(r11)
735 : "memory", "cc", "xer", "ctr", "lr");
736
737 out[0] = r4;
738 out[1] = r5;
739 out[2] = r6;
740 out[3] = r7;
741 out[4] = r8;
742 out[5] = r9;
743 out[6] = r10;
744 out[7] = r11;
745
746 return r3;
747 }
748 EXPORT_SYMBOL_GPL(kvm_hypercall);
749
kvm_para_setup(void)750 static int kvm_para_setup(void)
751 {
752 extern u32 kvm_hypercall_start;
753 struct device_node *hyper_node;
754 u32 *insts;
755 int len, i;
756
757 hyper_node = of_find_node_by_path("/hypervisor");
758 if (!hyper_node)
759 return -1;
760
761 insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
762 if (len % 4)
763 return -1;
764 if (len > (4 * 4))
765 return -1;
766
767 for (i = 0; i < (len / 4); i++)
768 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
769
770 return 0;
771 }
772
kvm_free_tmp(void)773 static __init void kvm_free_tmp(void)
774 {
775 unsigned long start, end;
776
777 start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
778 end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
779
780 /* Free the tmp space we don't need */
781 for (; start < end; start += PAGE_SIZE) {
782 ClearPageReserved(virt_to_page(start));
783 init_page_count(virt_to_page(start));
784 free_page(start);
785 totalram_pages++;
786 }
787 }
788
kvm_guest_init(void)789 static int __init kvm_guest_init(void)
790 {
791 if (!kvm_para_available())
792 goto free_tmp;
793
794 if (kvm_para_setup())
795 goto free_tmp;
796
797 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
798 kvm_use_magic_page();
799
800 #ifdef CONFIG_PPC_BOOK3S_64
801 /* Enable napping */
802 powersave_nap = 1;
803 #endif
804
805 free_tmp:
806 kvm_free_tmp();
807
808 return 0;
809 }
810
811 postcore_initcall(kvm_guest_init);
812