1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2022 SiFive
4 *
5 * Authors:
6 * Vincent Chen <vincent.chen@sifive.com>
7 * Greentime Hu <greentime.hu@sifive.com>
8 */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/uaccess.h>
14 #include <asm/hwcap.h>
15 #include <asm/kvm_vcpu_vector.h>
16 #include <asm/vector.h>
17
18 #ifdef CONFIG_RISCV_ISA_V
kvm_riscv_vcpu_vector_reset(struct kvm_vcpu * vcpu)19 void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
20 {
21 unsigned long *isa = vcpu->arch.isa;
22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
23
24 cntx->sstatus &= ~SR_VS;
25 if (riscv_isa_extension_available(isa, v)) {
26 cntx->sstatus |= SR_VS_INITIAL;
27 WARN_ON(!cntx->vector.datap);
28 memset(cntx->vector.datap, 0, riscv_v_vsize);
29 } else {
30 cntx->sstatus |= SR_VS_OFF;
31 }
32 }
33
kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context * cntx)34 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx)
35 {
36 cntx->sstatus &= ~SR_VS;
37 cntx->sstatus |= SR_VS_CLEAN;
38 }
39
kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context * cntx,unsigned long * isa)40 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
41 unsigned long *isa)
42 {
43 if ((cntx->sstatus & SR_VS) == SR_VS_DIRTY) {
44 if (riscv_isa_extension_available(isa, v))
45 __kvm_riscv_vector_save(cntx);
46 kvm_riscv_vcpu_vector_clean(cntx);
47 }
48 }
49
kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context * cntx,unsigned long * isa)50 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
51 unsigned long *isa)
52 {
53 if ((cntx->sstatus & SR_VS) != SR_VS_OFF) {
54 if (riscv_isa_extension_available(isa, v))
55 __kvm_riscv_vector_restore(cntx);
56 kvm_riscv_vcpu_vector_clean(cntx);
57 }
58 }
59
kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context * cntx)60 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
61 {
62 /* No need to check host sstatus as it can be modified outside */
63 if (riscv_isa_extension_available(NULL, v))
64 __kvm_riscv_vector_save(cntx);
65 }
66
kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context * cntx)67 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
68 {
69 if (riscv_isa_extension_available(NULL, v))
70 __kvm_riscv_vector_restore(cntx);
71 }
72
kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu * vcpu,struct kvm_cpu_context * cntx)73 int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
74 struct kvm_cpu_context *cntx)
75 {
76 cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
77 if (!cntx->vector.datap)
78 return -ENOMEM;
79
80 vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
81 if (!vcpu->arch.host_context.vector.datap)
82 return -ENOMEM;
83
84 return 0;
85 }
86
kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu * vcpu)87 void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
88 {
89 kfree(vcpu->arch.guest_reset_context.vector.datap);
90 kfree(vcpu->arch.host_context.vector.datap);
91 }
92 #endif
93
kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu * vcpu,unsigned long reg_num,size_t reg_size,void ** reg_addr)94 static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
95 unsigned long reg_num,
96 size_t reg_size,
97 void **reg_addr)
98 {
99 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
100 size_t vlenb = riscv_v_vsize / 32;
101
102 if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
103 if (reg_size != sizeof(unsigned long))
104 return -EINVAL;
105 switch (reg_num) {
106 case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
107 *reg_addr = &cntx->vector.vstart;
108 break;
109 case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
110 *reg_addr = &cntx->vector.vl;
111 break;
112 case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
113 *reg_addr = &cntx->vector.vtype;
114 break;
115 case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
116 *reg_addr = &cntx->vector.vcsr;
117 break;
118 case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
119 default:
120 return -ENOENT;
121 }
122 } else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
123 if (reg_size != vlenb)
124 return -EINVAL;
125 *reg_addr = cntx->vector.datap +
126 (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
127 } else {
128 return -ENOENT;
129 }
130
131 return 0;
132 }
133
kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)134 int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
135 const struct kvm_one_reg *reg)
136 {
137 unsigned long *isa = vcpu->arch.isa;
138 unsigned long __user *uaddr =
139 (unsigned long __user *)(unsigned long)reg->addr;
140 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
141 KVM_REG_SIZE_MASK |
142 KVM_REG_RISCV_VECTOR);
143 size_t reg_size = KVM_REG_SIZE(reg->id);
144 void *reg_addr;
145 int rc;
146
147 if (!riscv_isa_extension_available(isa, v))
148 return -ENOENT;
149
150 rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr);
151 if (rc)
152 return rc;
153
154 if (copy_to_user(uaddr, reg_addr, reg_size))
155 return -EFAULT;
156
157 return 0;
158 }
159
kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)160 int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
161 const struct kvm_one_reg *reg)
162 {
163 unsigned long *isa = vcpu->arch.isa;
164 unsigned long __user *uaddr =
165 (unsigned long __user *)(unsigned long)reg->addr;
166 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
167 KVM_REG_SIZE_MASK |
168 KVM_REG_RISCV_VECTOR);
169 size_t reg_size = KVM_REG_SIZE(reg->id);
170 void *reg_addr;
171 int rc;
172
173 if (!riscv_isa_extension_available(isa, v))
174 return -ENOENT;
175
176 rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr);
177 if (rc)
178 return rc;
179
180 if (copy_from_user(reg_addr, uaddr, reg_size))
181 return -EFAULT;
182
183 return 0;
184 }
185