• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/kvm_host.h>
7 #include <linux/entry-kvm.h>
8 #include <asm/fpu.h>
9 #include <asm/lbt.h>
10 #include <asm/loongarch.h>
11 #include <asm/setup.h>
12 #include <asm/time.h>
13 
14 #define CREATE_TRACE_POINTS
15 #include "trace.h"
16 
17 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
18 	KVM_GENERIC_VCPU_STATS(),
19 	STATS_DESC_COUNTER(VCPU, int_exits),
20 	STATS_DESC_COUNTER(VCPU, idle_exits),
21 	STATS_DESC_COUNTER(VCPU, cpucfg_exits),
22 	STATS_DESC_COUNTER(VCPU, signal_exits),
23 	STATS_DESC_COUNTER(VCPU, hypercall_exits)
24 };
25 
26 const struct kvm_stats_header kvm_vcpu_stats_header = {
27 	.name_size = KVM_STATS_NAME_SIZE,
28 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
29 	.id_offset = sizeof(struct kvm_stats_header),
30 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
31 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
32 		       sizeof(kvm_vcpu_stats_desc),
33 };
34 
kvm_save_host_pmu(struct kvm_vcpu * vcpu)35 static inline void kvm_save_host_pmu(struct kvm_vcpu *vcpu)
36 {
37 	struct kvm_context *context;
38 
39 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
40 	context->perf_cntr[0] = read_csr_perfcntr0();
41 	context->perf_cntr[1] = read_csr_perfcntr1();
42 	context->perf_cntr[2] = read_csr_perfcntr2();
43 	context->perf_cntr[3] = read_csr_perfcntr3();
44 	context->perf_ctrl[0] = write_csr_perfctrl0(0);
45 	context->perf_ctrl[1] = write_csr_perfctrl1(0);
46 	context->perf_ctrl[2] = write_csr_perfctrl2(0);
47 	context->perf_ctrl[3] = write_csr_perfctrl3(0);
48 }
49 
kvm_restore_host_pmu(struct kvm_vcpu * vcpu)50 static inline void kvm_restore_host_pmu(struct kvm_vcpu *vcpu)
51 {
52 	struct kvm_context *context;
53 
54 	context = this_cpu_ptr(vcpu->kvm->arch.vmcs);
55 	write_csr_perfcntr0(context->perf_cntr[0]);
56 	write_csr_perfcntr1(context->perf_cntr[1]);
57 	write_csr_perfcntr2(context->perf_cntr[2]);
58 	write_csr_perfcntr3(context->perf_cntr[3]);
59 	write_csr_perfctrl0(context->perf_ctrl[0]);
60 	write_csr_perfctrl1(context->perf_ctrl[1]);
61 	write_csr_perfctrl2(context->perf_ctrl[2]);
62 	write_csr_perfctrl3(context->perf_ctrl[3]);
63 }
64 
65 
kvm_save_guest_pmu(struct kvm_vcpu * vcpu)66 static inline void kvm_save_guest_pmu(struct kvm_vcpu *vcpu)
67 {
68 	struct loongarch_csrs *csr = vcpu->arch.csr;
69 
70 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
71 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
72 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
73 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
74 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
75 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
76 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
77 	kvm_read_clear_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
78 }
79 
kvm_restore_guest_pmu(struct kvm_vcpu * vcpu)80 static inline void kvm_restore_guest_pmu(struct kvm_vcpu *vcpu)
81 {
82 	struct loongarch_csrs *csr = vcpu->arch.csr;
83 
84 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0);
85 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1);
86 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2);
87 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3);
88 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
89 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
90 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
91 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
92 }
93 
kvm_own_pmu(struct kvm_vcpu * vcpu)94 static int kvm_own_pmu(struct kvm_vcpu *vcpu)
95 {
96 	unsigned long val;
97 
98 	if (!kvm_guest_has_pmu(&vcpu->arch))
99 		return -EINVAL;
100 
101 	kvm_save_host_pmu(vcpu);
102 
103 	/* Set PM0-PM(num) to guest */
104 	val = read_csr_gcfg() & ~CSR_GCFG_GPERF;
105 	val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT;
106 	write_csr_gcfg(val);
107 
108 	kvm_restore_guest_pmu(vcpu);
109 
110 	return 0;
111 }
112 
kvm_lose_pmu(struct kvm_vcpu * vcpu)113 static void kvm_lose_pmu(struct kvm_vcpu *vcpu)
114 {
115 	unsigned long val;
116 	struct loongarch_csrs *csr = vcpu->arch.csr;
117 
118 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU))
119 		return;
120 
121 	kvm_save_guest_pmu(vcpu);
122 
123 	/* Disable pmu access from guest */
124 	write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF);
125 
126 	/*
127 	 * Clear KVM_LARCH_PMU if the guest is not using PMU CSRs when
128 	 * exiting the guest, so that the next time trap into the guest.
129 	 * We don't need to deal with PMU CSRs contexts.
130 	 */
131 	val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0);
132 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1);
133 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2);
134 	val |= kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
135 	if (!(val & KVM_PMU_EVENT_ENABLED))
136 		vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU;
137 
138 	kvm_restore_host_pmu(vcpu);
139 }
140 
kvm_restore_pmu(struct kvm_vcpu * vcpu)141 static void kvm_restore_pmu(struct kvm_vcpu *vcpu)
142 {
143 	if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU))
144 		kvm_make_request(KVM_REQ_PMU, vcpu);
145 }
146 
kvm_check_pmu(struct kvm_vcpu * vcpu)147 static void kvm_check_pmu(struct kvm_vcpu *vcpu)
148 {
149 	if (kvm_check_request(KVM_REQ_PMU, vcpu)) {
150 		kvm_own_pmu(vcpu);
151 		vcpu->arch.aux_inuse |= KVM_LARCH_PMU;
152 	}
153 }
154 
kvm_update_stolen_time(struct kvm_vcpu * vcpu)155 static void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
156 {
157 	u32 version;
158 	u64 steal;
159 	gpa_t gpa;
160 	struct kvm_memslots *slots;
161 	struct kvm_steal_time __user *st;
162 	struct gfn_to_hva_cache *ghc;
163 
164 	ghc = &vcpu->arch.st.cache;
165 	gpa = vcpu->arch.st.guest_addr;
166 	if (!(gpa & KVM_STEAL_PHYS_VALID))
167 		return;
168 
169 	gpa &= KVM_STEAL_PHYS_MASK;
170 	slots = kvm_memslots(vcpu->kvm);
171 	if (slots->generation != ghc->generation || gpa != ghc->gpa) {
172 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) {
173 			ghc->gpa = INVALID_GPA;
174 			return;
175 		}
176 	}
177 
178 	st = (struct kvm_steal_time __user *)ghc->hva;
179 	unsafe_get_user(version, &st->version, out);
180 	if (version & 1)
181 		version += 1; /* first time write, random junk */
182 
183 	version += 1;
184 	unsafe_put_user(version, &st->version, out);
185 	smp_wmb();
186 
187 	unsafe_get_user(steal, &st->steal, out);
188 	steal += current->sched_info.run_delay - vcpu->arch.st.last_steal;
189 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
190 	unsafe_put_user(steal, &st->steal, out);
191 
192 	smp_wmb();
193 	version += 1;
194 	unsafe_put_user(version, &st->version, out);
195 out:
196 	mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
197 }
198 
199 /*
200  * kvm_check_requests - check and handle pending vCPU requests
201  *
202  * Return: RESUME_GUEST if we should enter the guest
203  *         RESUME_HOST  if we should exit to userspace
204  */
kvm_check_requests(struct kvm_vcpu * vcpu)205 static int kvm_check_requests(struct kvm_vcpu *vcpu)
206 {
207 	if (!kvm_request_pending(vcpu))
208 		return RESUME_GUEST;
209 
210 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
211 		vcpu->arch.vpid = 0;  /* Drop vpid for this vCPU */
212 
213 	if (kvm_dirty_ring_check_request(vcpu))
214 		return RESUME_HOST;
215 
216 	if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
217 		kvm_update_stolen_time(vcpu);
218 
219 	return RESUME_GUEST;
220 }
221 
kvm_late_check_requests(struct kvm_vcpu * vcpu)222 static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
223 {
224 	lockdep_assert_irqs_disabled();
225 	if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
226 		if (vcpu->arch.flush_gpa != INVALID_GPA) {
227 			kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
228 			vcpu->arch.flush_gpa = INVALID_GPA;
229 		}
230 }
231 
232 /*
233  * Check and handle pending signal and vCPU requests etc
234  * Run with irq enabled and preempt enabled
235  *
236  * Return: RESUME_GUEST if we should enter the guest
237  *         RESUME_HOST  if we should exit to userspace
238  *         < 0 if we should exit to userspace, where the return value
239  *         indicates an error
240  */
kvm_enter_guest_check(struct kvm_vcpu * vcpu)241 static int kvm_enter_guest_check(struct kvm_vcpu *vcpu)
242 {
243 	int idx, ret;
244 
245 	/*
246 	 * Check conditions before entering the guest
247 	 */
248 	ret = xfer_to_guest_mode_handle_work(vcpu);
249 	if (ret < 0)
250 		return ret;
251 
252 	idx = srcu_read_lock(&vcpu->kvm->srcu);
253 	ret = kvm_check_requests(vcpu);
254 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
255 
256 	return ret;
257 }
258 
259 /*
260  * Called with irq enabled
261  *
262  * Return: RESUME_GUEST if we should enter the guest, and irq disabled
263  *         Others if we should exit to userspace
264  */
kvm_pre_enter_guest(struct kvm_vcpu * vcpu)265 static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
266 {
267 	int ret;
268 
269 	do {
270 		ret = kvm_enter_guest_check(vcpu);
271 		if (ret != RESUME_GUEST)
272 			break;
273 
274 		/*
275 		 * Handle vcpu timer, interrupts, check requests and
276 		 * check vmid before vcpu enter guest
277 		 */
278 		local_irq_disable();
279 		kvm_deliver_intr(vcpu);
280 		kvm_deliver_exception(vcpu);
281 		/* Make sure the vcpu mode has been written */
282 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
283 		kvm_check_vpid(vcpu);
284 		kvm_check_pmu(vcpu);
285 
286 		/*
287 		 * Called after function kvm_check_vpid()
288 		 * Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
289 		 * and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
290 		 */
291 		kvm_late_check_requests(vcpu);
292 		vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
293 		/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
294 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
295 
296 		if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) {
297 			kvm_lose_pmu(vcpu);
298 			/* make sure the vcpu mode has been written */
299 			smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE);
300 			local_irq_enable();
301 			ret = -EAGAIN;
302 		}
303 	} while (ret != RESUME_GUEST);
304 
305 	return ret;
306 }
307 
308 /*
309  * Return 1 for resume guest and "<= 0" for resume host.
310  */
kvm_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu)311 static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
312 {
313 	int ret = RESUME_GUEST;
314 	unsigned long estat = vcpu->arch.host_estat;
315 	u32 intr = estat & CSR_ESTAT_IS;
316 	u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
317 
318 	vcpu->mode = OUTSIDE_GUEST_MODE;
319 
320 	/* Set a default exit reason */
321 	run->exit_reason = KVM_EXIT_UNKNOWN;
322 
323 	kvm_lose_pmu(vcpu);
324 
325 	guest_timing_exit_irqoff();
326 	guest_state_exit_irqoff();
327 	local_irq_enable();
328 
329 	trace_kvm_exit(vcpu, ecode);
330 	if (ecode) {
331 		ret = kvm_handle_fault(vcpu, ecode);
332 	} else {
333 		WARN(!intr, "vm exiting with suspicious irq\n");
334 		++vcpu->stat.int_exits;
335 	}
336 
337 	if (ret == RESUME_GUEST)
338 		ret = kvm_pre_enter_guest(vcpu);
339 
340 	if (ret != RESUME_GUEST) {
341 		local_irq_disable();
342 		return ret;
343 	}
344 
345 	guest_timing_enter_irqoff();
346 	guest_state_enter_irqoff();
347 	trace_kvm_reenter(vcpu);
348 
349 	return RESUME_GUEST;
350 }
351 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)352 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
353 {
354 	return !!(vcpu->arch.irq_pending) &&
355 		vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE;
356 }
357 
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)358 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
359 {
360 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
361 }
362 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)363 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
364 {
365 	return false;
366 }
367 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)368 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
369 {
370 	return VM_FAULT_SIGBUS;
371 }
372 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)373 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
374 				  struct kvm_translation *tr)
375 {
376 	return -EINVAL;
377 }
378 
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)379 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
380 {
381 	int ret;
382 
383 	/* Protect from TOD sync and vcpu_load/put() */
384 	preempt_disable();
385 	ret = kvm_pending_timer(vcpu) ||
386 		kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI);
387 	preempt_enable();
388 
389 	return ret;
390 }
391 
kvm_arch_vcpu_dump_regs(struct kvm_vcpu * vcpu)392 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
393 {
394 	int i;
395 
396 	kvm_debug("vCPU Register Dump:\n");
397 	kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc);
398 	kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending);
399 
400 	for (i = 0; i < 32; i += 4) {
401 		kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i,
402 		       vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1],
403 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
404 	}
405 
406 	kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n",
407 		  kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD),
408 		  kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT));
409 
410 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA));
411 
412 	return 0;
413 }
414 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)415 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
416 				struct kvm_mp_state *mp_state)
417 {
418 	*mp_state = vcpu->arch.mp_state;
419 
420 	return 0;
421 }
422 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)423 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
424 				struct kvm_mp_state *mp_state)
425 {
426 	int ret = 0;
427 
428 	switch (mp_state->mp_state) {
429 	case KVM_MP_STATE_RUNNABLE:
430 		vcpu->arch.mp_state = *mp_state;
431 		break;
432 	default:
433 		ret = -EINVAL;
434 	}
435 
436 	return ret;
437 }
438 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)439 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
440 					struct kvm_guest_debug *dbg)
441 {
442 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
443 		return -EINVAL;
444 
445 	if (dbg->control & KVM_GUESTDBG_ENABLE)
446 		vcpu->guest_debug = dbg->control;
447 	else
448 		vcpu->guest_debug = 0;
449 
450 	return 0;
451 }
452 
kvm_set_cpuid(struct kvm_vcpu * vcpu,u64 val)453 static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val)
454 {
455 	int cpuid;
456 	struct kvm_phyid_map *map;
457 	struct loongarch_csrs *csr = vcpu->arch.csr;
458 
459 	if (val >= KVM_MAX_PHYID)
460 		return -EINVAL;
461 
462 	map = vcpu->kvm->arch.phyid_map;
463 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
464 
465 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
466 	if ((cpuid < KVM_MAX_PHYID) && map->phys_map[cpuid].enabled) {
467 		/* Discard duplicated CPUID set operation */
468 		if (cpuid == val) {
469 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
470 			return 0;
471 		}
472 
473 		/*
474 		 * CPUID is already set before
475 		 * Forbid changing to a different CPUID at runtime
476 		 */
477 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
478 		return -EINVAL;
479 	}
480 
481 	if (map->phys_map[val].enabled) {
482 		/* Discard duplicated CPUID set operation */
483 		if (vcpu == map->phys_map[val].vcpu) {
484 			spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
485 			return 0;
486 		}
487 
488 		/*
489 		 * New CPUID is already set with other vcpu
490 		 * Forbid sharing the same CPUID between different vcpus
491 		 */
492 		spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
493 		return -EINVAL;
494 	}
495 
496 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val);
497 	map->phys_map[val].enabled	= true;
498 	map->phys_map[val].vcpu		= vcpu;
499 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
500 
501 	return 0;
502 }
503 
kvm_drop_cpuid(struct kvm_vcpu * vcpu)504 static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu)
505 {
506 	int cpuid;
507 	struct kvm_phyid_map *map;
508 	struct loongarch_csrs *csr = vcpu->arch.csr;
509 
510 	map = vcpu->kvm->arch.phyid_map;
511 	cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_CPUID);
512 
513 	if (cpuid >= KVM_MAX_PHYID)
514 		return;
515 
516 	spin_lock(&vcpu->kvm->arch.phyid_map_lock);
517 	if (map->phys_map[cpuid].enabled) {
518 		map->phys_map[cpuid].vcpu = NULL;
519 		map->phys_map[cpuid].enabled = false;
520 		kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
521 	}
522 	spin_unlock(&vcpu->kvm->arch.phyid_map_lock);
523 }
524 
kvm_get_vcpu_by_cpuid(struct kvm * kvm,int cpuid)525 struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid)
526 {
527 	struct kvm_phyid_map *map;
528 
529 	if (cpuid >= KVM_MAX_PHYID)
530 		return NULL;
531 
532 	map = kvm->arch.phyid_map;
533 	if (!map->phys_map[cpuid].enabled)
534 		return NULL;
535 
536 	return map->phys_map[cpuid].vcpu;
537 }
538 
_kvm_getcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 * val)539 static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val)
540 {
541 	unsigned long gintc;
542 	struct loongarch_csrs *csr = vcpu->arch.csr;
543 
544 	if (get_gcsr_flag(id) & INVALID_GCSR)
545 		return -EINVAL;
546 
547 	if (id == LOONGARCH_CSR_ESTAT) {
548 		preempt_disable();
549 		vcpu_load(vcpu);
550 		/*
551 		 * Sync pending interrupts into ESTAT so that interrupt
552 		 * remains during VM migration stage
553 		 */
554 		kvm_deliver_intr(vcpu);
555 		vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
556 		vcpu_put(vcpu);
557 		preempt_enable();
558 
559 		/* ESTAT IP0~IP7 get from GINTC */
560 		gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff;
561 		*val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2);
562 		return 0;
563 	}
564 
565 	/*
566 	 * Get software CSR state since software state is consistent
567 	 * with hardware for synchronous ioctl
568 	 */
569 	*val = kvm_read_sw_gcsr(csr, id);
570 
571 	return 0;
572 }
573 
_kvm_setcsr(struct kvm_vcpu * vcpu,unsigned int id,u64 val)574 static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val)
575 {
576 	int ret = 0, gintc;
577 	struct loongarch_csrs *csr = vcpu->arch.csr;
578 
579 	if (get_gcsr_flag(id) & INVALID_GCSR)
580 		return -EINVAL;
581 
582 	if (id == LOONGARCH_CSR_CPUID)
583 		return kvm_set_cpuid(vcpu, val);
584 
585 	if (id == LOONGARCH_CSR_ESTAT) {
586 		/* ESTAT IP0~IP7 inject through GINTC */
587 		gintc = (val >> 2) & 0xff;
588 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc);
589 
590 		gintc = val & ~(0xffUL << 2);
591 		kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc);
592 
593 		return ret;
594 	}
595 
596 	kvm_write_sw_gcsr(csr, id, val);
597 
598 	/*
599 	 * After modifying the PMU CSR register value of the vcpu.
600 	 * If the PMU CSRs are used, we need to set KVM_REQ_PMU.
601 	 */
602 	if (id >= LOONGARCH_CSR_PERFCTRL0 && id <= LOONGARCH_CSR_PERFCNTR3) {
603 		unsigned long val;
604 
605 		val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) |
606 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) |
607 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) |
608 		      kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3);
609 
610 		if (val & KVM_PMU_EVENT_ENABLED)
611 			kvm_make_request(KVM_REQ_PMU, vcpu);
612 	}
613 
614 	return ret;
615 }
616 
_kvm_get_cpucfg_mask(int id,u64 * v)617 static int _kvm_get_cpucfg_mask(int id, u64 *v)
618 {
619 	if (id < 0 || id >= KVM_MAX_CPUCFG_REGS)
620 		return -EINVAL;
621 
622 	switch (id) {
623 	case LOONGARCH_CPUCFG0:
624 		*v = GENMASK(31, 0);
625 		return 0;
626 	case LOONGARCH_CPUCFG1:
627 		/* CPUCFG1_MSGINT is not supported by KVM */
628 		*v = GENMASK(25, 0);
629 		return 0;
630 	case LOONGARCH_CPUCFG2:
631 		/* CPUCFG2 features unconditionally supported by KVM */
632 		*v = CPUCFG2_FP     | CPUCFG2_FPSP  | CPUCFG2_FPDP     |
633 		     CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
634 		     CPUCFG2_LSPW | CPUCFG2_LAM;
635 		/*
636 		 * For the ISA extensions listed below, if one is supported
637 		 * by the host, then it is also supported by KVM.
638 		 */
639 		if (cpu_has_lsx)
640 			*v |= CPUCFG2_LSX;
641 		if (cpu_has_lasx)
642 			*v |= CPUCFG2_LASX;
643 		if (cpu_has_lbt_x86)
644 			*v |= CPUCFG2_X86BT;
645 		if (cpu_has_lbt_arm)
646 			*v |= CPUCFG2_ARMBT;
647 		if (cpu_has_lbt_mips)
648 			*v |= CPUCFG2_MIPSBT;
649 
650 		return 0;
651 	case LOONGARCH_CPUCFG3:
652 		*v = GENMASK(16, 0);
653 		return 0;
654 	case LOONGARCH_CPUCFG4:
655 	case LOONGARCH_CPUCFG5:
656 		*v = GENMASK(31, 0);
657 		return 0;
658 	case LOONGARCH_CPUCFG6:
659 		if (cpu_has_pmp)
660 			*v = GENMASK(14, 0);
661 		else
662 			*v = 0;
663 		return 0;
664 	case LOONGARCH_CPUCFG16:
665 		*v = GENMASK(16, 0);
666 		return 0;
667 	case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
668 		*v = GENMASK(30, 0);
669 		return 0;
670 	default:
671 		/*
672 		 * CPUCFG bits should be zero if reserved by HW or not
673 		 * supported by KVM.
674 		 */
675 		*v = 0;
676 		return 0;
677 	}
678 }
679 
kvm_check_cpucfg(int id,u64 val)680 static int kvm_check_cpucfg(int id, u64 val)
681 {
682 	int ret;
683 	u64 mask = 0;
684 
685 	ret = _kvm_get_cpucfg_mask(id, &mask);
686 	if (ret)
687 		return ret;
688 
689 	if (val & ~mask)
690 		/* Unsupported features and/or the higher 32 bits should not be set */
691 		return -EINVAL;
692 
693 	switch (id) {
694 	case LOONGARCH_CPUCFG2:
695 		if (!(val & CPUCFG2_LLFTP))
696 			/* Guests must have a constant timer */
697 			return -EINVAL;
698 		if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP)))
699 			/* Single and double float point must both be set when FP is enabled */
700 			return -EINVAL;
701 		if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP))
702 			/* LSX architecturally implies FP but val does not satisfy that */
703 			return -EINVAL;
704 		if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX))
705 			/* LASX architecturally implies LSX and FP but val does not satisfy that */
706 			return -EINVAL;
707 		return 0;
708 	case LOONGARCH_CPUCFG6:
709 		if (val & CPUCFG6_PMP) {
710 			u32 host = read_cpucfg(LOONGARCH_CPUCFG6);
711 			if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS))
712 				return -EINVAL;
713 			if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM))
714 				return -EINVAL;
715 			if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM))
716 				return -EINVAL;
717 		}
718 		return 0;
719 	default:
720 		/*
721 		 * Values for the other CPUCFG IDs are not being further validated
722 		 * besides the mask check above.
723 		 */
724 		return 0;
725 	}
726 }
727 
kvm_get_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 * v)728 static int kvm_get_one_reg(struct kvm_vcpu *vcpu,
729 		const struct kvm_one_reg *reg, u64 *v)
730 {
731 	int id, ret = 0;
732 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
733 
734 	switch (type) {
735 	case KVM_REG_LOONGARCH_CSR:
736 		id = KVM_GET_IOC_CSR_IDX(reg->id);
737 		ret = _kvm_getcsr(vcpu, id, v);
738 		break;
739 	case KVM_REG_LOONGARCH_CPUCFG:
740 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
741 		if (id >= 0 && id < KVM_MAX_CPUCFG_REGS)
742 			*v = vcpu->arch.cpucfg[id];
743 		else
744 			ret = -EINVAL;
745 		break;
746 	case KVM_REG_LOONGARCH_LBT:
747 		if (!kvm_guest_has_lbt(&vcpu->arch))
748 			return -ENXIO;
749 
750 		switch (reg->id) {
751 		case KVM_REG_LOONGARCH_LBT_SCR0:
752 			*v = vcpu->arch.lbt.scr0;
753 			break;
754 		case KVM_REG_LOONGARCH_LBT_SCR1:
755 			*v = vcpu->arch.lbt.scr1;
756 			break;
757 		case KVM_REG_LOONGARCH_LBT_SCR2:
758 			*v = vcpu->arch.lbt.scr2;
759 			break;
760 		case KVM_REG_LOONGARCH_LBT_SCR3:
761 			*v = vcpu->arch.lbt.scr3;
762 			break;
763 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
764 			*v = vcpu->arch.lbt.eflags;
765 			break;
766 		case KVM_REG_LOONGARCH_LBT_FTOP:
767 			*v = vcpu->arch.fpu.ftop;
768 			break;
769 		default:
770 			ret = -EINVAL;
771 			break;
772 		}
773 		break;
774 	case KVM_REG_LOONGARCH_KVM:
775 		switch (reg->id) {
776 		case KVM_REG_LOONGARCH_COUNTER:
777 			*v = drdtime() + vcpu->kvm->arch.time_offset;
778 			break;
779 		case KVM_REG_LOONGARCH_DEBUG_INST:
780 			*v = INSN_HVCL | KVM_HCALL_SWDBG;
781 			break;
782 		default:
783 			ret = -EINVAL;
784 			break;
785 		}
786 		break;
787 	default:
788 		ret = -EINVAL;
789 		break;
790 	}
791 
792 	return ret;
793 }
794 
kvm_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)795 static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
796 {
797 	int ret = 0;
798 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
799 
800 	switch (size) {
801 	case KVM_REG_SIZE_U64:
802 		ret = kvm_get_one_reg(vcpu, reg, &v);
803 		if (ret)
804 			return ret;
805 		ret = put_user(v, (u64 __user *)(long)reg->addr);
806 		break;
807 	default:
808 		ret = -EINVAL;
809 		break;
810 	}
811 
812 	return ret;
813 }
814 
kvm_set_one_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg,u64 v)815 static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
816 			const struct kvm_one_reg *reg, u64 v)
817 {
818 	int id, ret = 0;
819 	u64 type = reg->id & KVM_REG_LOONGARCH_MASK;
820 
821 	switch (type) {
822 	case KVM_REG_LOONGARCH_CSR:
823 		id = KVM_GET_IOC_CSR_IDX(reg->id);
824 		ret = _kvm_setcsr(vcpu, id, v);
825 		break;
826 	case KVM_REG_LOONGARCH_CPUCFG:
827 		id = KVM_GET_IOC_CPUCFG_IDX(reg->id);
828 		ret = kvm_check_cpucfg(id, v);
829 		if (ret)
830 			break;
831 		vcpu->arch.cpucfg[id] = (u32)v;
832 		if (id == LOONGARCH_CPUCFG6)
833 			vcpu->arch.max_pmu_csrid =
834 				LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1;
835 		break;
836 	case KVM_REG_LOONGARCH_LBT:
837 		if (!kvm_guest_has_lbt(&vcpu->arch))
838 			return -ENXIO;
839 
840 		switch (reg->id) {
841 		case KVM_REG_LOONGARCH_LBT_SCR0:
842 			vcpu->arch.lbt.scr0 = v;
843 			break;
844 		case KVM_REG_LOONGARCH_LBT_SCR1:
845 			vcpu->arch.lbt.scr1 = v;
846 			break;
847 		case KVM_REG_LOONGARCH_LBT_SCR2:
848 			vcpu->arch.lbt.scr2 = v;
849 			break;
850 		case KVM_REG_LOONGARCH_LBT_SCR3:
851 			vcpu->arch.lbt.scr3 = v;
852 			break;
853 		case KVM_REG_LOONGARCH_LBT_EFLAGS:
854 			vcpu->arch.lbt.eflags = v;
855 			break;
856 		case KVM_REG_LOONGARCH_LBT_FTOP:
857 			vcpu->arch.fpu.ftop = v;
858 			break;
859 		default:
860 			ret = -EINVAL;
861 			break;
862 		}
863 		break;
864 	case KVM_REG_LOONGARCH_KVM:
865 		switch (reg->id) {
866 		case KVM_REG_LOONGARCH_COUNTER:
867 			/*
868 			 * gftoffset is relative with board, not vcpu
869 			 * only set for the first time for smp system
870 			 */
871 			if (vcpu->vcpu_id == 0)
872 				vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
873 			break;
874 		case KVM_REG_LOONGARCH_VCPU_RESET:
875 			vcpu->arch.st.guest_addr = 0;
876 			memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
877 			memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
878 
879 			/*
880 			 * When vCPU reset, clear the ESTAT and GINTC registers
881 			 * Other CSR registers are cleared with function _kvm_setcsr().
882 			 */
883 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_GINTC, 0);
884 			kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_ESTAT, 0);
885 			break;
886 		default:
887 			ret = -EINVAL;
888 			break;
889 		}
890 		break;
891 	default:
892 		ret = -EINVAL;
893 		break;
894 	}
895 
896 	return ret;
897 }
898 
kvm_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)899 static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
900 {
901 	int ret = 0;
902 	u64 v, size = reg->id & KVM_REG_SIZE_MASK;
903 
904 	switch (size) {
905 	case KVM_REG_SIZE_U64:
906 		ret = get_user(v, (u64 __user *)(long)reg->addr);
907 		if (ret)
908 			return ret;
909 		break;
910 	default:
911 		return -EINVAL;
912 	}
913 
914 	return kvm_set_one_reg(vcpu, reg, v);
915 }
916 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)917 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
918 {
919 	return -ENOIOCTLCMD;
920 }
921 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)922 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
923 {
924 	return -ENOIOCTLCMD;
925 }
926 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)927 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
928 {
929 	int i;
930 
931 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
932 		regs->gpr[i] = vcpu->arch.gprs[i];
933 
934 	regs->pc = vcpu->arch.pc;
935 
936 	return 0;
937 }
938 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)939 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
940 {
941 	int i;
942 
943 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
944 		vcpu->arch.gprs[i] = regs->gpr[i];
945 
946 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
947 	vcpu->arch.pc = regs->pc;
948 
949 	return 0;
950 }
951 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)952 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
953 				     struct kvm_enable_cap *cap)
954 {
955 	/* FPU is enabled by default, will support LSX/LASX later. */
956 	return -EINVAL;
957 }
958 
kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)959 static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu,
960 					 struct kvm_device_attr *attr)
961 {
962 	switch (attr->attr) {
963 	case LOONGARCH_CPUCFG2:
964 	case LOONGARCH_CPUCFG6:
965 		return 0;
966 	case CPUCFG_KVM_FEATURE:
967 		return 0;
968 	default:
969 		return -ENXIO;
970 	}
971 
972 	return -ENXIO;
973 }
974 
kvm_loongarch_pvtime_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)975 static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu,
976 					 struct kvm_device_attr *attr)
977 {
978 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
979 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
980 		return -ENXIO;
981 
982 	return 0;
983 }
984 
kvm_loongarch_vcpu_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)985 static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu,
986 				       struct kvm_device_attr *attr)
987 {
988 	int ret = -ENXIO;
989 
990 	switch (attr->group) {
991 	case KVM_LOONGARCH_VCPU_CPUCFG:
992 		ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr);
993 		break;
994 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
995 		ret = kvm_loongarch_pvtime_has_attr(vcpu, attr);
996 		break;
997 	default:
998 		break;
999 	}
1000 
1001 	return ret;
1002 }
1003 
kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1004 static int kvm_loongarch_cpucfg_get_attr(struct kvm_vcpu *vcpu,
1005 					 struct kvm_device_attr *attr)
1006 {
1007 	int ret = 0;
1008 	uint64_t val;
1009 	uint64_t __user *uaddr = (uint64_t __user *)attr->addr;
1010 
1011 	switch (attr->attr) {
1012 	case 0 ... (KVM_MAX_CPUCFG_REGS - 1):
1013 		ret = _kvm_get_cpucfg_mask(attr->attr, &val);
1014 		if (ret)
1015 			return ret;
1016 		break;
1017 	case CPUCFG_KVM_FEATURE:
1018 		val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK;
1019 		break;
1020 	default:
1021 		return -ENXIO;
1022 	}
1023 
1024 	put_user(val, uaddr);
1025 
1026 	return ret;
1027 }
1028 
kvm_loongarch_pvtime_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1029 static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu,
1030 					 struct kvm_device_attr *attr)
1031 {
1032 	u64 gpa;
1033 	u64 __user *user = (u64 __user *)attr->addr;
1034 
1035 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1036 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1037 		return -ENXIO;
1038 
1039 	gpa = vcpu->arch.st.guest_addr;
1040 	if (put_user(gpa, user))
1041 		return -EFAULT;
1042 
1043 	return 0;
1044 }
1045 
kvm_loongarch_vcpu_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1046 static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu,
1047 				       struct kvm_device_attr *attr)
1048 {
1049 	int ret = -ENXIO;
1050 
1051 	switch (attr->group) {
1052 	case KVM_LOONGARCH_VCPU_CPUCFG:
1053 		ret = kvm_loongarch_cpucfg_get_attr(vcpu, attr);
1054 		break;
1055 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1056 		ret = kvm_loongarch_pvtime_get_attr(vcpu, attr);
1057 		break;
1058 	default:
1059 		break;
1060 	}
1061 
1062 	return ret;
1063 }
1064 
kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1065 static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu,
1066 					 struct kvm_device_attr *attr)
1067 {
1068 	u64 val, valid;
1069 	u64 __user *user = (u64 __user *)attr->addr;
1070 	struct kvm *kvm = vcpu->kvm;
1071 
1072 	switch (attr->attr) {
1073 	case CPUCFG_KVM_FEATURE:
1074 		if (get_user(val, user))
1075 			return -EFAULT;
1076 
1077 		valid = LOONGARCH_PV_FEAT_MASK;
1078 		if (val & ~valid)
1079 			return -EINVAL;
1080 
1081 		/* All vCPUs need set the same PV features */
1082 		if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED)
1083 				&& ((kvm->arch.pv_features & valid) != val))
1084 			return -EINVAL;
1085 		kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED;
1086 		return 0;
1087 	default:
1088 		return -ENXIO;
1089 	}
1090 }
1091 
kvm_loongarch_pvtime_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1092 static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu,
1093 					 struct kvm_device_attr *attr)
1094 {
1095 	int idx, ret = 0;
1096 	u64 gpa, __user *user = (u64 __user *)attr->addr;
1097 	struct kvm *kvm = vcpu->kvm;
1098 
1099 	if (!kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_STEAL_TIME)
1100 			|| attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA)
1101 		return -ENXIO;
1102 
1103 	if (get_user(gpa, user))
1104 		return -EFAULT;
1105 
1106 	if (gpa & ~(KVM_STEAL_PHYS_MASK | KVM_STEAL_PHYS_VALID))
1107 		return -EINVAL;
1108 
1109 	if (!(gpa & KVM_STEAL_PHYS_VALID)) {
1110 		vcpu->arch.st.guest_addr = gpa;
1111 		return 0;
1112 	}
1113 
1114 	/* Check the address is in a valid memslot */
1115 	idx = srcu_read_lock(&kvm->srcu);
1116 	if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT)))
1117 		ret = -EINVAL;
1118 	srcu_read_unlock(&kvm->srcu, idx);
1119 
1120 	if (!ret) {
1121 		vcpu->arch.st.guest_addr = gpa;
1122 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1123 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1124 	}
1125 
1126 	return ret;
1127 }
1128 
kvm_loongarch_vcpu_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1129 static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu,
1130 				       struct kvm_device_attr *attr)
1131 {
1132 	int ret = -ENXIO;
1133 
1134 	switch (attr->group) {
1135 	case KVM_LOONGARCH_VCPU_CPUCFG:
1136 		ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr);
1137 		break;
1138 	case KVM_LOONGARCH_VCPU_PVTIME_CTRL:
1139 		ret = kvm_loongarch_pvtime_set_attr(vcpu, attr);
1140 		break;
1141 	default:
1142 		break;
1143 	}
1144 
1145 	return ret;
1146 }
1147 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1148 long kvm_arch_vcpu_ioctl(struct file *filp,
1149 			 unsigned int ioctl, unsigned long arg)
1150 {
1151 	long r;
1152 	struct kvm_device_attr attr;
1153 	void __user *argp = (void __user *)arg;
1154 	struct kvm_vcpu *vcpu = filp->private_data;
1155 
1156 	/*
1157 	 * Only software CSR should be modified
1158 	 *
1159 	 * If any hardware CSR register is modified, vcpu_load/vcpu_put pair
1160 	 * should be used. Since CSR registers owns by this vcpu, if switch
1161 	 * to other vcpus, other vcpus need reload CSR registers.
1162 	 *
1163 	 * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should
1164 	 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check
1165 	 * aux_inuse flag and reload CSR registers form software.
1166 	 */
1167 
1168 	switch (ioctl) {
1169 	case KVM_SET_ONE_REG:
1170 	case KVM_GET_ONE_REG: {
1171 		struct kvm_one_reg reg;
1172 
1173 		r = -EFAULT;
1174 		if (copy_from_user(&reg, argp, sizeof(reg)))
1175 			break;
1176 		if (ioctl == KVM_SET_ONE_REG) {
1177 			r = kvm_set_reg(vcpu, &reg);
1178 			vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1179 		} else
1180 			r = kvm_get_reg(vcpu, &reg);
1181 		break;
1182 	}
1183 	case KVM_ENABLE_CAP: {
1184 		struct kvm_enable_cap cap;
1185 
1186 		r = -EFAULT;
1187 		if (copy_from_user(&cap, argp, sizeof(cap)))
1188 			break;
1189 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1190 		break;
1191 	}
1192 	case KVM_HAS_DEVICE_ATTR: {
1193 		r = -EFAULT;
1194 		if (copy_from_user(&attr, argp, sizeof(attr)))
1195 			break;
1196 		r = kvm_loongarch_vcpu_has_attr(vcpu, &attr);
1197 		break;
1198 	}
1199 	case KVM_GET_DEVICE_ATTR: {
1200 		r = -EFAULT;
1201 		if (copy_from_user(&attr, argp, sizeof(attr)))
1202 			break;
1203 		r = kvm_loongarch_vcpu_get_attr(vcpu, &attr);
1204 		break;
1205 	}
1206 	case KVM_SET_DEVICE_ATTR: {
1207 		r = -EFAULT;
1208 		if (copy_from_user(&attr, argp, sizeof(attr)))
1209 			break;
1210 		r = kvm_loongarch_vcpu_set_attr(vcpu, &attr);
1211 		break;
1212 	}
1213 	default:
1214 		r = -ENOIOCTLCMD;
1215 		break;
1216 	}
1217 
1218 	return r;
1219 }
1220 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1221 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1222 {
1223 	int i = 0;
1224 
1225 	fpu->fcc = vcpu->arch.fpu.fcc;
1226 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1227 	for (i = 0; i < NUM_FPU_REGS; i++)
1228 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1229 
1230 	return 0;
1231 }
1232 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1233 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1234 {
1235 	int i = 0;
1236 
1237 	vcpu->arch.fpu.fcc = fpu->fcc;
1238 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1239 	for (i = 0; i < NUM_FPU_REGS; i++)
1240 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1241 
1242 	return 0;
1243 }
1244 
1245 #ifdef CONFIG_CPU_HAS_LBT
kvm_own_lbt(struct kvm_vcpu * vcpu)1246 int kvm_own_lbt(struct kvm_vcpu *vcpu)
1247 {
1248 	if (!kvm_guest_has_lbt(&vcpu->arch))
1249 		return -EINVAL;
1250 
1251 	preempt_disable();
1252 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1253 		set_csr_euen(CSR_EUEN_LBTEN);
1254 		_restore_lbt(&vcpu->arch.lbt);
1255 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1256 	}
1257 	preempt_enable();
1258 
1259 	return 0;
1260 }
1261 
kvm_lose_lbt(struct kvm_vcpu * vcpu)1262 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1263 {
1264 	preempt_disable();
1265 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1266 		_save_lbt(&vcpu->arch.lbt);
1267 		clear_csr_euen(CSR_EUEN_LBTEN);
1268 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1269 	}
1270 	preempt_enable();
1271 }
1272 
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1273 static void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr)
1274 {
1275 	/*
1276 	 * If TM is enabled, top register save/restore will
1277 	 * cause lbt exception, here enable lbt in advance
1278 	 */
1279 	if (fcsr & FPU_CSR_TM)
1280 		kvm_own_lbt(vcpu);
1281 }
1282 
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1283 static void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu)
1284 {
1285 	if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1286 		if (vcpu->arch.aux_inuse & KVM_LARCH_LBT)
1287 			return;
1288 		kvm_check_fcsr(vcpu, read_fcsr(LOONGARCH_FCSR0));
1289 	}
1290 }
1291 #else
kvm_lose_lbt(struct kvm_vcpu * vcpu)1292 static inline void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
kvm_check_fcsr(struct kvm_vcpu * vcpu,unsigned long fcsr)1293 static inline void kvm_check_fcsr(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_check_fcsr_alive(struct kvm_vcpu * vcpu)1294 static inline void kvm_check_fcsr_alive(struct kvm_vcpu *vcpu) { }
1295 #endif
1296 
1297 /* Enable FPU and restore context */
kvm_own_fpu(struct kvm_vcpu * vcpu)1298 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1299 {
1300 	preempt_disable();
1301 
1302 	/*
1303 	 * Enable FPU for guest
1304 	 * Set FR and FRE according to guest context
1305 	 */
1306 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1307 	set_csr_euen(CSR_EUEN_FPEN);
1308 
1309 	kvm_restore_fpu(&vcpu->arch.fpu);
1310 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
1311 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1312 
1313 	preempt_enable();
1314 }
1315 
1316 #ifdef CONFIG_CPU_HAS_LSX
1317 /* Enable LSX and restore context */
kvm_own_lsx(struct kvm_vcpu * vcpu)1318 int kvm_own_lsx(struct kvm_vcpu *vcpu)
1319 {
1320 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch))
1321 		return -EINVAL;
1322 
1323 	preempt_disable();
1324 
1325 	/* Enable LSX for guest */
1326 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1327 	set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN);
1328 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1329 	case KVM_LARCH_FPU:
1330 		/*
1331 		 * Guest FPU state already loaded,
1332 		 * only restore upper LSX state
1333 		 */
1334 		_restore_lsx_upper(&vcpu->arch.fpu);
1335 		break;
1336 	default:
1337 		/* Neither FP or LSX already active,
1338 		 * restore full LSX state
1339 		 */
1340 		kvm_restore_lsx(&vcpu->arch.fpu);
1341 		break;
1342 	}
1343 
1344 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX);
1345 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
1346 	preempt_enable();
1347 
1348 	return 0;
1349 }
1350 #endif
1351 
1352 #ifdef CONFIG_CPU_HAS_LASX
1353 /* Enable LASX and restore context */
kvm_own_lasx(struct kvm_vcpu * vcpu)1354 int kvm_own_lasx(struct kvm_vcpu *vcpu)
1355 {
1356 	if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch))
1357 		return -EINVAL;
1358 
1359 	preempt_disable();
1360 
1361 	kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr);
1362 	set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1363 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
1364 	case KVM_LARCH_LSX:
1365 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
1366 		/* Guest LSX state already loaded, only restore upper LASX state */
1367 		_restore_lasx_upper(&vcpu->arch.fpu);
1368 		break;
1369 	case KVM_LARCH_FPU:
1370 		/* Guest FP state already loaded, only restore upper LSX & LASX state */
1371 		_restore_lsx_upper(&vcpu->arch.fpu);
1372 		_restore_lasx_upper(&vcpu->arch.fpu);
1373 		break;
1374 	default:
1375 		/* Neither FP or LSX already active, restore full LASX state */
1376 		kvm_restore_lasx(&vcpu->arch.fpu);
1377 		break;
1378 	}
1379 
1380 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
1381 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
1382 	preempt_enable();
1383 
1384 	return 0;
1385 }
1386 #endif
1387 
1388 /* Save context and disable FPU */
kvm_lose_fpu(struct kvm_vcpu * vcpu)1389 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1390 {
1391 	preempt_disable();
1392 
1393 	kvm_check_fcsr_alive(vcpu);
1394 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
1395 		kvm_save_lasx(&vcpu->arch.fpu);
1396 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX);
1397 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX);
1398 
1399 		/* Disable LASX & LSX & FPU */
1400 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
1401 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
1402 		kvm_save_lsx(&vcpu->arch.fpu);
1403 		vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU);
1404 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX);
1405 
1406 		/* Disable LSX & FPU */
1407 		clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN);
1408 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
1409 		kvm_save_fpu(&vcpu->arch.fpu);
1410 		vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU;
1411 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1412 
1413 		/* Disable FPU */
1414 		clear_csr_euen(CSR_EUEN_FPEN);
1415 	}
1416 	kvm_lose_lbt(vcpu);
1417 
1418 	preempt_enable();
1419 }
1420 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1421 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1422 {
1423 	int intr = (int)irq->irq;
1424 
1425 	if (intr > 0)
1426 		kvm_queue_irq(vcpu, intr);
1427 	else if (intr < 0)
1428 		kvm_dequeue_irq(vcpu, -intr);
1429 	else {
1430 		kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq);
1431 		return -EINVAL;
1432 	}
1433 
1434 	kvm_vcpu_kick(vcpu);
1435 
1436 	return 0;
1437 }
1438 
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)1439 long kvm_arch_vcpu_async_ioctl(struct file *filp,
1440 			       unsigned int ioctl, unsigned long arg)
1441 {
1442 	void __user *argp = (void __user *)arg;
1443 	struct kvm_vcpu *vcpu = filp->private_data;
1444 
1445 	if (ioctl == KVM_INTERRUPT) {
1446 		struct kvm_interrupt irq;
1447 
1448 		if (copy_from_user(&irq, argp, sizeof(irq)))
1449 			return -EFAULT;
1450 
1451 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq);
1452 
1453 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1454 	}
1455 
1456 	return -ENOIOCTLCMD;
1457 }
1458 
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)1459 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
1460 {
1461 	return 0;
1462 }
1463 
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)1464 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
1465 {
1466 	unsigned long timer_hz;
1467 	struct loongarch_csrs *csr;
1468 
1469 	vcpu->arch.vpid = 0;
1470 	vcpu->arch.flush_gpa = INVALID_GPA;
1471 
1472 	hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
1473 	vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
1474 
1475 	vcpu->arch.handle_exit = kvm_handle_exit;
1476 	vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry;
1477 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
1478 	if (!vcpu->arch.csr)
1479 		return -ENOMEM;
1480 
1481 	/*
1482 	 * All kvm exceptions share one exception entry, and host <-> guest
1483 	 * switch also switch ECFG.VS field, keep host ECFG.VS info here.
1484 	 */
1485 	vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
1486 
1487 	/* Init */
1488 	vcpu->arch.last_sched_cpu = -1;
1489 
1490 	/*
1491 	 * Initialize guest register state to valid architectural reset state.
1492 	 */
1493 	timer_hz = calc_const_freq();
1494 	kvm_init_timer(vcpu, timer_hz);
1495 
1496 	/* Set Initialize mode for guest */
1497 	csr = vcpu->arch.csr;
1498 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
1499 
1500 	/* Set cpuid */
1501 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
1502 	kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, KVM_MAX_PHYID);
1503 
1504 	/* Start with no pending virtual guest interrupts */
1505 	csr->csrs[LOONGARCH_CSR_GINTC] = 0;
1506 
1507 	return 0;
1508 }
1509 
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)1510 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1511 {
1512 }
1513 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)1514 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1515 {
1516 	int cpu;
1517 	struct kvm_context *context;
1518 
1519 	hrtimer_cancel(&vcpu->arch.swtimer);
1520 	kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1521 	kvm_drop_cpuid(vcpu);
1522 	kfree(vcpu->arch.csr);
1523 
1524 	/*
1525 	 * If the vCPU is freed and reused as another vCPU, we don't want the
1526 	 * matching pointer wrongly hanging around in last_vcpu.
1527 	 */
1528 	for_each_possible_cpu(cpu) {
1529 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1530 		if (context->last_vcpu == vcpu)
1531 			context->last_vcpu = NULL;
1532 	}
1533 }
1534 
_kvm_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1535 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1536 {
1537 	bool migrated;
1538 	struct kvm_context *context;
1539 	struct loongarch_csrs *csr = vcpu->arch.csr;
1540 
1541 	/*
1542 	 * Have we migrated to a different CPU?
1543 	 * If so, any old guest TLB state may be stale.
1544 	 */
1545 	migrated = (vcpu->arch.last_sched_cpu != cpu);
1546 
1547 	/*
1548 	 * Was this the last vCPU to run on this CPU?
1549 	 * If not, any old guest state from this vCPU will have been clobbered.
1550 	 */
1551 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
1552 	if (migrated || (context->last_vcpu != vcpu))
1553 		vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE;
1554 	context->last_vcpu = vcpu;
1555 
1556 	/* Restore timer state regardless */
1557 	kvm_restore_timer(vcpu);
1558 
1559 	/* Control guest page CCA attribute */
1560 	change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT);
1561 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1562 
1563 	/* Restore hardware PMU CSRs */
1564 	kvm_restore_pmu(vcpu);
1565 
1566 	/* Don't bother restoring registers multiple times unless necessary */
1567 	if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE)
1568 		return 0;
1569 
1570 	write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset);
1571 
1572 	/* Restore guest CSR registers */
1573 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1574 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1575 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1576 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1577 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1578 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1579 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1580 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1581 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1582 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1583 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1584 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1585 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1586 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1587 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1588 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1589 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1590 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1591 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1592 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1593 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1594 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1595 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1596 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1597 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1598 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1599 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1600 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1601 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1602 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1603 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1604 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1605 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1606 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1607 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1608 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1609 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1610 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1611 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1612 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1613 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1614 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1615 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1616 	kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1617 
1618 	/* Restore Root.GINTC from unused Guest.GINTC register */
1619 	write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]);
1620 
1621 	/*
1622 	 * We should clear linked load bit to break interrupted atomics. This
1623 	 * prevents a SC on the next vCPU from succeeding by matching a LL on
1624 	 * the previous vCPU.
1625 	 */
1626 	if (vcpu->kvm->created_vcpus > 1)
1627 		set_gcsr_llbctl(CSR_LLBCTL_WCLLB);
1628 
1629 	vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE;
1630 
1631 	return 0;
1632 }
1633 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)1634 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1635 {
1636 	unsigned long flags;
1637 
1638 	local_irq_save(flags);
1639 	/* Restore guest state to registers */
1640 	_kvm_vcpu_load(vcpu, cpu);
1641 	local_irq_restore(flags);
1642 }
1643 
_kvm_vcpu_put(struct kvm_vcpu * vcpu,int cpu)1644 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1645 {
1646 	struct loongarch_csrs *csr = vcpu->arch.csr;
1647 
1648 	kvm_lose_fpu(vcpu);
1649 
1650 	/*
1651 	 * Update CSR state from hardware if software CSR state is stale,
1652 	 * most CSR registers are kept unchanged during process context
1653 	 * switch except CSR registers like remaining timer tick value and
1654 	 * injected interrupt state.
1655 	 */
1656 	if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST)
1657 		goto out;
1658 
1659 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD);
1660 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD);
1661 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN);
1662 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC);
1663 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG);
1664 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA);
1665 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV);
1666 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI);
1667 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY);
1668 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX);
1669 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI);
1670 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0);
1671 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1);
1672 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID);
1673 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL);
1674 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH);
1675 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0);
1676 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1);
1677 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE);
1678 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG);
1679 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID);
1680 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1);
1681 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2);
1682 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3);
1683 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0);
1684 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1);
1685 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2);
1686 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3);
1687 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4);
1688 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5);
1689 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6);
1690 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7);
1691 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID);
1692 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC);
1693 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL);
1694 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY);
1695 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV);
1696 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA);
1697 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE);
1698 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0);
1699 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1);
1700 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI);
1701 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD);
1702 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0);
1703 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1);
1704 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2);
1705 	kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3);
1706 
1707 	vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST;
1708 
1709 out:
1710 	kvm_save_timer(vcpu);
1711 	/* Save Root.GINTC into unused Guest.GINTC register */
1712 	csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc();
1713 
1714 	return 0;
1715 }
1716 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)1717 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1718 {
1719 	int cpu;
1720 	unsigned long flags;
1721 
1722 	local_irq_save(flags);
1723 	cpu = smp_processor_id();
1724 	vcpu->arch.last_sched_cpu = cpu;
1725 
1726 	/* Save guest state in registers */
1727 	_kvm_vcpu_put(vcpu, cpu);
1728 	local_irq_restore(flags);
1729 }
1730 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1731 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1732 {
1733 	int r = -EINTR;
1734 	struct kvm_run *run = vcpu->run;
1735 
1736 	if (vcpu->mmio_needed) {
1737 		if (!vcpu->mmio_is_write)
1738 			kvm_complete_mmio_read(vcpu, run);
1739 		vcpu->mmio_needed = 0;
1740 	}
1741 
1742 	if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
1743 		if (!run->iocsr_io.is_write)
1744 			kvm_complete_iocsr_read(vcpu, run);
1745 	}
1746 
1747 	if (!vcpu->wants_to_run)
1748 		return r;
1749 
1750 	/* Clear exit_reason */
1751 	run->exit_reason = KVM_EXIT_UNKNOWN;
1752 	lose_fpu(1);
1753 	vcpu_load(vcpu);
1754 	kvm_sigset_activate(vcpu);
1755 	r = kvm_pre_enter_guest(vcpu);
1756 	if (r != RESUME_GUEST)
1757 		goto out;
1758 
1759 	guest_timing_enter_irqoff();
1760 	guest_state_enter_irqoff();
1761 	trace_kvm_enter(vcpu);
1762 	r = kvm_loongarch_ops->enter_guest(run, vcpu);
1763 
1764 	trace_kvm_out(vcpu);
1765 	/*
1766 	 * Guest exit is already recorded at kvm_handle_exit()
1767 	 * return value must not be RESUME_GUEST
1768 	 */
1769 	local_irq_enable();
1770 out:
1771 	kvm_sigset_deactivate(vcpu);
1772 	vcpu_put(vcpu);
1773 
1774 	return r;
1775 }
1776