1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/err.h>
7 #include <linux/errno.h>
8 #include <linux/kvm_host.h>
9 #include <linux/module.h>
10 #include <linux/preempt.h>
11 #include <linux/vmalloc.h>
12 #include <trace/events/kvm.h>
13 #include <asm/fpu.h>
14 #include <asm/inst.h>
15 #include <asm/loongarch.h>
16 #include <asm/mmzone.h>
17 #include <asm/numa.h>
18 #include <asm/time.h>
19 #include <asm/tlb.h>
20 #include <asm/kvm_csr.h>
21 #include <asm/kvm_vcpu.h>
22 #include "trace.h"
23
kvm_emu_cpucfg(struct kvm_vcpu * vcpu,larch_inst inst)24 static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst)
25 {
26 int rd, rj;
27 unsigned int index, ret;
28
29 if (inst.reg2_format.opcode != cpucfg_op)
30 return EMULATE_FAIL;
31
32 rd = inst.reg2_format.rd;
33 rj = inst.reg2_format.rj;
34 ++vcpu->stat.cpucfg_exits;
35 index = vcpu->arch.gprs[rj];
36
37 /*
38 * By LoongArch Reference Manual 2.2.10.5
39 * Return value is 0 for undefined CPUCFG index
40 *
41 * Disable preemption since hw gcsr is accessed
42 */
43 preempt_disable();
44 switch (index) {
45 case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
46 vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index];
47 break;
48 case CPUCFG_KVM_SIG:
49 /* CPUCFG emulation between 0x40000000 -- 0x400000ff */
50 vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE;
51 break;
52 case CPUCFG_KVM_FEATURE:
53 ret = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
54 vcpu->arch.gprs[rd] = ret;
55 break;
56 default:
57 vcpu->arch.gprs[rd] = 0;
58 break;
59 }
60 preempt_enable();
61
62 return EMULATE_DONE;
63 }
64
kvm_emu_read_csr(struct kvm_vcpu * vcpu,int csrid)65 static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
66 {
67 unsigned long val = 0;
68 struct loongarch_csrs *csr = vcpu->arch.csr;
69
70 /*
71 * From LoongArch Reference Manual Volume 1 Chapter 4.2.1
72 * For undefined CSR id, return value is 0
73 */
74 if (get_gcsr_flag(csrid) & SW_GCSR)
75 val = kvm_read_sw_gcsr(csr, csrid);
76 else
77 pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
78
79 return val;
80 }
81
kvm_emu_write_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long val)82 static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)
83 {
84 unsigned long old = 0;
85 struct loongarch_csrs *csr = vcpu->arch.csr;
86
87 if (get_gcsr_flag(csrid) & SW_GCSR) {
88 old = kvm_read_sw_gcsr(csr, csrid);
89 kvm_write_sw_gcsr(csr, csrid, val);
90 } else
91 pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
92
93 return old;
94 }
95
kvm_emu_xchg_csr(struct kvm_vcpu * vcpu,int csrid,unsigned long csr_mask,unsigned long val)96 static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
97 unsigned long csr_mask, unsigned long val)
98 {
99 unsigned long old = 0;
100 struct loongarch_csrs *csr = vcpu->arch.csr;
101
102 if (get_gcsr_flag(csrid) & SW_GCSR) {
103 old = kvm_read_sw_gcsr(csr, csrid);
104 val = (old & ~csr_mask) | (val & csr_mask);
105 kvm_write_sw_gcsr(csr, csrid, val);
106 old = old & csr_mask;
107 } else
108 pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc);
109
110 return old;
111 }
112
kvm_handle_csr(struct kvm_vcpu * vcpu,larch_inst inst)113 static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
114 {
115 unsigned int rd, rj, csrid;
116 unsigned long csr_mask, val = 0;
117
118 /*
119 * CSR value mask imm
120 * rj = 0 means csrrd
121 * rj = 1 means csrwr
122 * rj != 0,1 means csrxchg
123 */
124 rd = inst.reg2csr_format.rd;
125 rj = inst.reg2csr_format.rj;
126 csrid = inst.reg2csr_format.csr;
127
128 if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= vcpu->arch.max_pmu_csrid) {
129 if (kvm_guest_has_pmu(&vcpu->arch)) {
130 vcpu->arch.pc -= 4;
131 kvm_make_request(KVM_REQ_PMU, vcpu);
132 return EMULATE_DONE;
133 }
134 }
135
136 /* Process CSR ops */
137 switch (rj) {
138 case 0: /* process csrrd */
139 val = kvm_emu_read_csr(vcpu, csrid);
140 vcpu->arch.gprs[rd] = val;
141 break;
142 case 1: /* process csrwr */
143 val = vcpu->arch.gprs[rd];
144 val = kvm_emu_write_csr(vcpu, csrid, val);
145 vcpu->arch.gprs[rd] = val;
146 break;
147 default: /* process csrxchg */
148 val = vcpu->arch.gprs[rd];
149 csr_mask = vcpu->arch.gprs[rj];
150 val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
151 vcpu->arch.gprs[rd] = val;
152 }
153
154 return EMULATE_DONE;
155 }
156
kvm_emu_iocsr(larch_inst inst,struct kvm_run * run,struct kvm_vcpu * vcpu)157 int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)
158 {
159 int ret;
160 unsigned long val;
161 u32 addr, rd, rj, opcode;
162
163 /*
164 * Each IOCSR with different opcode
165 */
166 rd = inst.reg2_format.rd;
167 rj = inst.reg2_format.rj;
168 opcode = inst.reg2_format.opcode;
169 addr = vcpu->arch.gprs[rj];
170 ret = EMULATE_DO_IOCSR;
171 run->iocsr_io.phys_addr = addr;
172 run->iocsr_io.is_write = 0;
173
174 /* LoongArch is Little endian */
175 switch (opcode) {
176 case iocsrrdb_op:
177 run->iocsr_io.len = 1;
178 break;
179 case iocsrrdh_op:
180 run->iocsr_io.len = 2;
181 break;
182 case iocsrrdw_op:
183 run->iocsr_io.len = 4;
184 break;
185 case iocsrrdd_op:
186 run->iocsr_io.len = 8;
187 break;
188 case iocsrwrb_op:
189 run->iocsr_io.len = 1;
190 run->iocsr_io.is_write = 1;
191 break;
192 case iocsrwrh_op:
193 run->iocsr_io.len = 2;
194 run->iocsr_io.is_write = 1;
195 break;
196 case iocsrwrw_op:
197 run->iocsr_io.len = 4;
198 run->iocsr_io.is_write = 1;
199 break;
200 case iocsrwrd_op:
201 run->iocsr_io.len = 8;
202 run->iocsr_io.is_write = 1;
203 break;
204 default:
205 ret = EMULATE_FAIL;
206 break;
207 }
208
209 if (ret == EMULATE_DO_IOCSR) {
210 if (run->iocsr_io.is_write) {
211 val = vcpu->arch.gprs[rd];
212 memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
213 }
214 vcpu->arch.io_gpr = rd;
215 }
216
217 return ret;
218 }
219
kvm_complete_iocsr_read(struct kvm_vcpu * vcpu,struct kvm_run * run)220 int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
221 {
222 enum emulation_result er = EMULATE_DONE;
223 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
224
225 switch (run->iocsr_io.len) {
226 case 1:
227 *gpr = *(s8 *)run->iocsr_io.data;
228 break;
229 case 2:
230 *gpr = *(s16 *)run->iocsr_io.data;
231 break;
232 case 4:
233 *gpr = *(s32 *)run->iocsr_io.data;
234 break;
235 case 8:
236 *gpr = *(s64 *)run->iocsr_io.data;
237 break;
238 default:
239 kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n",
240 run->iocsr_io.len, vcpu->arch.badv);
241 er = EMULATE_FAIL;
242 break;
243 }
244
245 return er;
246 }
247
kvm_emu_idle(struct kvm_vcpu * vcpu)248 int kvm_emu_idle(struct kvm_vcpu *vcpu)
249 {
250 ++vcpu->stat.idle_exits;
251 trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE);
252
253 if (!kvm_arch_vcpu_runnable(vcpu))
254 kvm_vcpu_halt(vcpu);
255
256 return EMULATE_DONE;
257 }
258
kvm_trap_handle_gspr(struct kvm_vcpu * vcpu)259 static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
260 {
261 unsigned long curr_pc;
262 larch_inst inst;
263 enum emulation_result er = EMULATE_DONE;
264 struct kvm_run *run = vcpu->run;
265
266 /* Fetch the instruction */
267 inst.word = vcpu->arch.badi;
268 curr_pc = vcpu->arch.pc;
269 update_pc(&vcpu->arch);
270
271 trace_kvm_exit_gspr(vcpu, inst.word);
272 er = EMULATE_FAIL;
273 switch (((inst.word >> 24) & 0xff)) {
274 case 0x0: /* CPUCFG GSPR */
275 er = kvm_emu_cpucfg(vcpu, inst);
276 break;
277 case 0x4: /* CSR{RD,WR,XCHG} GSPR */
278 er = kvm_handle_csr(vcpu, inst);
279 break;
280 case 0x6: /* Cache, Idle and IOCSR GSPR */
281 switch (((inst.word >> 22) & 0x3ff)) {
282 case 0x18: /* Cache GSPR */
283 er = EMULATE_DONE;
284 trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE);
285 break;
286 case 0x19: /* Idle/IOCSR GSPR */
287 switch (((inst.word >> 15) & 0x1ffff)) {
288 case 0xc90: /* IOCSR GSPR */
289 er = kvm_emu_iocsr(inst, run, vcpu);
290 break;
291 case 0xc91: /* Idle GSPR */
292 er = kvm_emu_idle(vcpu);
293 break;
294 default:
295 er = EMULATE_FAIL;
296 break;
297 }
298 break;
299 default:
300 er = EMULATE_FAIL;
301 break;
302 }
303 break;
304 default:
305 er = EMULATE_FAIL;
306 break;
307 }
308
309 /* Rollback PC only if emulation was unsuccessful */
310 if (er == EMULATE_FAIL) {
311 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
312 curr_pc, __func__, inst.word);
313
314 kvm_arch_vcpu_dump_regs(vcpu);
315 vcpu->arch.pc = curr_pc;
316 }
317
318 return er;
319 }
320
321 /*
322 * Trigger GSPR:
323 * 1) Execute CPUCFG instruction;
324 * 2) Execute CACOP/IDLE instructions;
325 * 3) Access to unimplemented CSRs/IOCSRs.
326 */
kvm_handle_gspr(struct kvm_vcpu * vcpu)327 static int kvm_handle_gspr(struct kvm_vcpu *vcpu)
328 {
329 int ret = RESUME_GUEST;
330 enum emulation_result er = EMULATE_DONE;
331
332 er = kvm_trap_handle_gspr(vcpu);
333
334 if (er == EMULATE_DONE) {
335 ret = RESUME_GUEST;
336 } else if (er == EMULATE_DO_MMIO) {
337 vcpu->run->exit_reason = KVM_EXIT_MMIO;
338 ret = RESUME_HOST;
339 } else if (er == EMULATE_DO_IOCSR) {
340 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
341 ret = RESUME_HOST;
342 } else {
343 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
344 ret = RESUME_GUEST;
345 }
346
347 return ret;
348 }
349
kvm_emu_mmio_read(struct kvm_vcpu * vcpu,larch_inst inst)350 int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
351 {
352 int ret;
353 unsigned int op8, opcode, rd;
354 struct kvm_run *run = vcpu->run;
355
356 run->mmio.phys_addr = vcpu->arch.badv;
357 vcpu->mmio_needed = 2; /* signed */
358 op8 = (inst.word >> 24) & 0xff;
359 ret = EMULATE_DO_MMIO;
360
361 switch (op8) {
362 case 0x24 ... 0x27: /* ldptr.w/d process */
363 rd = inst.reg2i14_format.rd;
364 opcode = inst.reg2i14_format.opcode;
365
366 switch (opcode) {
367 case ldptrw_op:
368 run->mmio.len = 4;
369 break;
370 case ldptrd_op:
371 run->mmio.len = 8;
372 break;
373 default:
374 break;
375 }
376 break;
377 case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */
378 rd = inst.reg2i12_format.rd;
379 opcode = inst.reg2i12_format.opcode;
380
381 switch (opcode) {
382 case ldb_op:
383 run->mmio.len = 1;
384 break;
385 case ldbu_op:
386 vcpu->mmio_needed = 1; /* unsigned */
387 run->mmio.len = 1;
388 break;
389 case ldh_op:
390 run->mmio.len = 2;
391 break;
392 case ldhu_op:
393 vcpu->mmio_needed = 1; /* unsigned */
394 run->mmio.len = 2;
395 break;
396 case ldw_op:
397 run->mmio.len = 4;
398 break;
399 case ldwu_op:
400 vcpu->mmio_needed = 1; /* unsigned */
401 run->mmio.len = 4;
402 break;
403 case ldd_op:
404 run->mmio.len = 8;
405 break;
406 default:
407 ret = EMULATE_FAIL;
408 break;
409 }
410 break;
411 case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */
412 rd = inst.reg3_format.rd;
413 opcode = inst.reg3_format.opcode;
414
415 switch (opcode) {
416 case ldxb_op:
417 run->mmio.len = 1;
418 break;
419 case ldxbu_op:
420 run->mmio.len = 1;
421 vcpu->mmio_needed = 1; /* unsigned */
422 break;
423 case ldxh_op:
424 run->mmio.len = 2;
425 break;
426 case ldxhu_op:
427 run->mmio.len = 2;
428 vcpu->mmio_needed = 1; /* unsigned */
429 break;
430 case ldxw_op:
431 run->mmio.len = 4;
432 break;
433 case ldxwu_op:
434 run->mmio.len = 4;
435 vcpu->mmio_needed = 1; /* unsigned */
436 break;
437 case ldxd_op:
438 run->mmio.len = 8;
439 break;
440 default:
441 ret = EMULATE_FAIL;
442 break;
443 }
444 break;
445 default:
446 ret = EMULATE_FAIL;
447 }
448
449 if (ret == EMULATE_DO_MMIO) {
450 /* Set for kvm_complete_mmio_read() use */
451 vcpu->arch.io_gpr = rd;
452 run->mmio.is_write = 0;
453 vcpu->mmio_is_write = 0;
454 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, run->mmio.len,
455 run->mmio.phys_addr, NULL);
456 } else {
457 kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
458 inst.word, vcpu->arch.pc, vcpu->arch.badv);
459 kvm_arch_vcpu_dump_regs(vcpu);
460 vcpu->mmio_needed = 0;
461 }
462
463 return ret;
464 }
465
kvm_complete_mmio_read(struct kvm_vcpu * vcpu,struct kvm_run * run)466 int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
467 {
468 enum emulation_result er = EMULATE_DONE;
469 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
470
471 /* Update with new PC */
472 update_pc(&vcpu->arch);
473 switch (run->mmio.len) {
474 case 1:
475 if (vcpu->mmio_needed == 2)
476 *gpr = *(s8 *)run->mmio.data;
477 else
478 *gpr = *(u8 *)run->mmio.data;
479 break;
480 case 2:
481 if (vcpu->mmio_needed == 2)
482 *gpr = *(s16 *)run->mmio.data;
483 else
484 *gpr = *(u16 *)run->mmio.data;
485 break;
486 case 4:
487 if (vcpu->mmio_needed == 2)
488 *gpr = *(s32 *)run->mmio.data;
489 else
490 *gpr = *(u32 *)run->mmio.data;
491 break;
492 case 8:
493 *gpr = *(s64 *)run->mmio.data;
494 break;
495 default:
496 kvm_err("Bad MMIO length: %d, addr is 0x%lx\n",
497 run->mmio.len, vcpu->arch.badv);
498 er = EMULATE_FAIL;
499 break;
500 }
501
502 trace_kvm_mmio(KVM_TRACE_MMIO_READ, run->mmio.len,
503 run->mmio.phys_addr, run->mmio.data);
504
505 return er;
506 }
507
kvm_emu_mmio_write(struct kvm_vcpu * vcpu,larch_inst inst)508 int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
509 {
510 int ret;
511 unsigned int rd, op8, opcode;
512 unsigned long curr_pc, rd_val = 0;
513 struct kvm_run *run = vcpu->run;
514 void *data = run->mmio.data;
515
516 /*
517 * Update PC and hold onto current PC in case there is
518 * an error and we want to rollback the PC
519 */
520 curr_pc = vcpu->arch.pc;
521 update_pc(&vcpu->arch);
522
523 op8 = (inst.word >> 24) & 0xff;
524 run->mmio.phys_addr = vcpu->arch.badv;
525 ret = EMULATE_DO_MMIO;
526 switch (op8) {
527 case 0x24 ... 0x27: /* stptr.w/d process */
528 rd = inst.reg2i14_format.rd;
529 opcode = inst.reg2i14_format.opcode;
530
531 switch (opcode) {
532 case stptrw_op:
533 run->mmio.len = 4;
534 *(unsigned int *)data = vcpu->arch.gprs[rd];
535 break;
536 case stptrd_op:
537 run->mmio.len = 8;
538 *(unsigned long *)data = vcpu->arch.gprs[rd];
539 break;
540 default:
541 ret = EMULATE_FAIL;
542 break;
543 }
544 break;
545 case 0x28 ... 0x2e: /* st.b/h/w/d process */
546 rd = inst.reg2i12_format.rd;
547 opcode = inst.reg2i12_format.opcode;
548 rd_val = vcpu->arch.gprs[rd];
549
550 switch (opcode) {
551 case stb_op:
552 run->mmio.len = 1;
553 *(unsigned char *)data = rd_val;
554 break;
555 case sth_op:
556 run->mmio.len = 2;
557 *(unsigned short *)data = rd_val;
558 break;
559 case stw_op:
560 run->mmio.len = 4;
561 *(unsigned int *)data = rd_val;
562 break;
563 case std_op:
564 run->mmio.len = 8;
565 *(unsigned long *)data = rd_val;
566 break;
567 default:
568 ret = EMULATE_FAIL;
569 break;
570 }
571 break;
572 case 0x38: /* stx.b/h/w/d process */
573 rd = inst.reg3_format.rd;
574 opcode = inst.reg3_format.opcode;
575
576 switch (opcode) {
577 case stxb_op:
578 run->mmio.len = 1;
579 *(unsigned char *)data = vcpu->arch.gprs[rd];
580 break;
581 case stxh_op:
582 run->mmio.len = 2;
583 *(unsigned short *)data = vcpu->arch.gprs[rd];
584 break;
585 case stxw_op:
586 run->mmio.len = 4;
587 *(unsigned int *)data = vcpu->arch.gprs[rd];
588 break;
589 case stxd_op:
590 run->mmio.len = 8;
591 *(unsigned long *)data = vcpu->arch.gprs[rd];
592 break;
593 default:
594 ret = EMULATE_FAIL;
595 break;
596 }
597 break;
598 default:
599 ret = EMULATE_FAIL;
600 }
601
602 if (ret == EMULATE_DO_MMIO) {
603 run->mmio.is_write = 1;
604 vcpu->mmio_needed = 1;
605 vcpu->mmio_is_write = 1;
606 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, run->mmio.len,
607 run->mmio.phys_addr, data);
608 } else {
609 vcpu->arch.pc = curr_pc;
610 kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n",
611 inst.word, vcpu->arch.pc, vcpu->arch.badv);
612 kvm_arch_vcpu_dump_regs(vcpu);
613 /* Rollback PC if emulation was unsuccessful */
614 }
615
616 return ret;
617 }
618
kvm_handle_rdwr_fault(struct kvm_vcpu * vcpu,bool write)619 static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write)
620 {
621 int ret;
622 larch_inst inst;
623 enum emulation_result er = EMULATE_DONE;
624 struct kvm_run *run = vcpu->run;
625 unsigned long badv = vcpu->arch.badv;
626
627 /* Inject ADE exception if exceed max GPA size */
628 if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) {
629 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
630 return RESUME_GUEST;
631 }
632
633 ret = kvm_handle_mm_fault(vcpu, badv, write);
634 if (ret) {
635 /* Treat as MMIO */
636 inst.word = vcpu->arch.badi;
637 if (write) {
638 er = kvm_emu_mmio_write(vcpu, inst);
639 } else {
640 /* A code fetch fault doesn't count as an MMIO */
641 if (kvm_is_ifetch_fault(&vcpu->arch)) {
642 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF);
643 return RESUME_GUEST;
644 }
645
646 er = kvm_emu_mmio_read(vcpu, inst);
647 }
648 }
649
650 if (er == EMULATE_DONE) {
651 ret = RESUME_GUEST;
652 } else if (er == EMULATE_DO_MMIO) {
653 run->exit_reason = KVM_EXIT_MMIO;
654 ret = RESUME_HOST;
655 } else {
656 kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM);
657 ret = RESUME_GUEST;
658 }
659
660 return ret;
661 }
662
kvm_handle_read_fault(struct kvm_vcpu * vcpu)663 static int kvm_handle_read_fault(struct kvm_vcpu *vcpu)
664 {
665 return kvm_handle_rdwr_fault(vcpu, false);
666 }
667
kvm_handle_write_fault(struct kvm_vcpu * vcpu)668 static int kvm_handle_write_fault(struct kvm_vcpu *vcpu)
669 {
670 return kvm_handle_rdwr_fault(vcpu, true);
671 }
672
673 /**
674 * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
675 * @vcpu: Virtual CPU context.
676 *
677 * Handle when the guest attempts to use fpu which hasn't been allowed
678 * by the root context.
679 */
kvm_handle_fpu_disabled(struct kvm_vcpu * vcpu)680 static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
681 {
682 struct kvm_run *run = vcpu->run;
683
684 if (!kvm_guest_has_fpu(&vcpu->arch)) {
685 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
686 return RESUME_GUEST;
687 }
688
689 /*
690 * If guest FPU not present, the FPU operation should have been
691 * treated as a reserved instruction!
692 * If FPU already in use, we shouldn't get this at all.
693 */
694 if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
695 kvm_err("%s internal error\n", __func__);
696 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
697 return RESUME_HOST;
698 }
699
700 kvm_own_fpu(vcpu);
701
702 return RESUME_GUEST;
703 }
704
kvm_save_notify(struct kvm_vcpu * vcpu)705 static long kvm_save_notify(struct kvm_vcpu *vcpu)
706 {
707 unsigned long id, data;
708
709 id = kvm_read_reg(vcpu, LOONGARCH_GPR_A1);
710 data = kvm_read_reg(vcpu, LOONGARCH_GPR_A2);
711 switch (id) {
712 case BIT(KVM_FEATURE_STEAL_TIME):
713 if (data & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
714 return KVM_HCALL_INVALID_PARAMETER;
715
716 vcpu->arch.st.guest_addr = data;
717 if (!(data & KVM_STEAL_PHYS_VALID))
718 return 0;
719
720 vcpu->arch.st.last_steal = current->sched_info.run_delay;
721 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
722 return 0;
723 default:
724 return KVM_HCALL_INVALID_CODE;
725 };
726
727 return KVM_HCALL_INVALID_CODE;
728 };
729
730 /*
731 * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
732 * @vcpu: Virtual CPU context.
733 *
734 * Handle when the guest attempts to use LSX when it is disabled in the root
735 * context.
736 */
kvm_handle_lsx_disabled(struct kvm_vcpu * vcpu)737 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
738 {
739 if (kvm_own_lsx(vcpu))
740 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
741
742 return RESUME_GUEST;
743 }
744
745 /*
746 * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
747 * @vcpu: Virtual CPU context.
748 *
749 * Handle when the guest attempts to use LASX when it is disabled in the root
750 * context.
751 */
kvm_handle_lasx_disabled(struct kvm_vcpu * vcpu)752 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
753 {
754 if (kvm_own_lasx(vcpu))
755 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
756
757 return RESUME_GUEST;
758 }
759
kvm_handle_lbt_disabled(struct kvm_vcpu * vcpu)760 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
761 {
762 if (kvm_own_lbt(vcpu))
763 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
764
765 return RESUME_GUEST;
766 }
767
kvm_send_pv_ipi(struct kvm_vcpu * vcpu)768 static int kvm_send_pv_ipi(struct kvm_vcpu *vcpu)
769 {
770 unsigned int min, cpu, i;
771 unsigned long ipi_bitmap;
772 struct kvm_vcpu *dest;
773
774 min = kvm_read_reg(vcpu, LOONGARCH_GPR_A3);
775 for (i = 0; i < 2; i++, min += BITS_PER_LONG) {
776 ipi_bitmap = kvm_read_reg(vcpu, LOONGARCH_GPR_A1 + i);
777 if (!ipi_bitmap)
778 continue;
779
780 cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG);
781 while (cpu < BITS_PER_LONG) {
782 dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min);
783 cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, cpu + 1);
784 if (!dest)
785 continue;
786
787 /* Send SWI0 to dest vcpu to emulate IPI interrupt */
788 kvm_queue_irq(dest, INT_SWI0);
789 kvm_vcpu_kick(dest);
790 }
791 }
792
793 return 0;
794 }
795
796 /*
797 * Hypercall emulation always return to guest, Caller should check retval.
798 */
kvm_handle_service(struct kvm_vcpu * vcpu)799 static void kvm_handle_service(struct kvm_vcpu *vcpu)
800 {
801 long ret = KVM_HCALL_INVALID_CODE;
802 unsigned long func = kvm_read_reg(vcpu, LOONGARCH_GPR_A0);
803
804 switch (func) {
805 case KVM_HCALL_FUNC_IPI:
806 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_IPI)) {
807 kvm_send_pv_ipi(vcpu);
808 ret = KVM_HCALL_SUCCESS;
809 }
810 break;
811 case KVM_HCALL_FUNC_NOTIFY:
812 if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME))
813 ret = kvm_save_notify(vcpu);
814 break;
815 default:
816 break;
817 }
818
819 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, ret);
820 }
821
kvm_handle_hypercall(struct kvm_vcpu * vcpu)822 static int kvm_handle_hypercall(struct kvm_vcpu *vcpu)
823 {
824 int ret;
825 larch_inst inst;
826 unsigned int code;
827
828 inst.word = vcpu->arch.badi;
829 code = inst.reg0i15_format.immediate;
830 ret = RESUME_GUEST;
831
832 switch (code) {
833 case KVM_HCALL_SERVICE:
834 vcpu->stat.hypercall_exits++;
835 kvm_handle_service(vcpu);
836 break;
837 case KVM_HCALL_SWDBG:
838 /* KVM_HCALL_SWDBG only in effective when SW_BP is enabled */
839 if (vcpu->guest_debug & KVM_GUESTDBG_SW_BP_MASK) {
840 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
841 ret = RESUME_HOST;
842 break;
843 }
844 fallthrough;
845 default:
846 /* Treat it as noop intruction, only set return value */
847 kvm_write_reg(vcpu, LOONGARCH_GPR_A0, KVM_HCALL_INVALID_CODE);
848 break;
849 }
850
851 if (ret == RESUME_GUEST)
852 update_pc(&vcpu->arch);
853
854 return ret;
855 }
856
857 /*
858 * LoongArch KVM callback handling for unimplemented guest exiting
859 */
kvm_fault_ni(struct kvm_vcpu * vcpu)860 static int kvm_fault_ni(struct kvm_vcpu *vcpu)
861 {
862 unsigned int ecode, inst;
863 unsigned long estat, badv;
864
865 /* Fetch the instruction */
866 inst = vcpu->arch.badi;
867 badv = vcpu->arch.badv;
868 estat = vcpu->arch.host_estat;
869 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
870 kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n",
871 ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat());
872 kvm_arch_vcpu_dump_regs(vcpu);
873 kvm_queue_exception(vcpu, EXCCODE_INE, 0);
874
875 return RESUME_GUEST;
876 }
877
878 static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = {
879 [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni,
880 [EXCCODE_TLBI] = kvm_handle_read_fault,
881 [EXCCODE_TLBL] = kvm_handle_read_fault,
882 [EXCCODE_TLBS] = kvm_handle_write_fault,
883 [EXCCODE_TLBM] = kvm_handle_write_fault,
884 [EXCCODE_FPDIS] = kvm_handle_fpu_disabled,
885 [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled,
886 [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled,
887 [EXCCODE_BTDIS] = kvm_handle_lbt_disabled,
888 [EXCCODE_GSPR] = kvm_handle_gspr,
889 [EXCCODE_HVC] = kvm_handle_hypercall,
890 };
891
kvm_handle_fault(struct kvm_vcpu * vcpu,int fault)892 int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
893 {
894 return kvm_fault_tables[fault](vcpu);
895 }
896