• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hosting Protected Virtual Machines
4  *
5  * Copyright IBM Corp. 2019, 2020
6  *    Author(s): Janosch Frank <frankja@linux.ibm.com>
7  */
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched/signal.h>
12 #include <asm/gmap.h>
13 #include <asm/uv.h>
14 #include <asm/mman.h>
15 #include "kvm-s390.h"
16 
kvm_s390_pv_destroy_cpu(struct kvm_vcpu * vcpu,u16 * rc,u16 * rrc)17 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
18 {
19 	int cc;
20 
21 	if (!kvm_s390_pv_cpu_get_handle(vcpu))
22 		return 0;
23 
24 	cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
25 
26 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
27 		     vcpu->vcpu_id, *rc, *rrc);
28 	WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
29 
30 	/* Intended memory leak for something that should never happen. */
31 	if (!cc)
32 		free_pages(vcpu->arch.pv.stor_base,
33 			   get_order(uv_info.guest_cpu_stor_len));
34 
35 	free_page(sida_origin(vcpu->arch.sie_block));
36 	vcpu->arch.sie_block->pv_handle_cpu = 0;
37 	vcpu->arch.sie_block->pv_handle_config = 0;
38 	memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
39 	vcpu->arch.sie_block->sdf = 0;
40 	/*
41 	 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
42 	 * Use the reset value of gbea to avoid leaking the kernel pointer of
43 	 * the just freed sida.
44 	 */
45 	vcpu->arch.sie_block->gbea = 1;
46 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
47 
48 	return cc ? EIO : 0;
49 }
50 
kvm_s390_pv_create_cpu(struct kvm_vcpu * vcpu,u16 * rc,u16 * rrc)51 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
52 {
53 	struct uv_cb_csc uvcb = {
54 		.header.cmd = UVC_CMD_CREATE_SEC_CPU,
55 		.header.len = sizeof(uvcb),
56 	};
57 	int cc;
58 
59 	if (kvm_s390_pv_cpu_get_handle(vcpu))
60 		return -EINVAL;
61 
62 	vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL,
63 						   get_order(uv_info.guest_cpu_stor_len));
64 	if (!vcpu->arch.pv.stor_base)
65 		return -ENOMEM;
66 
67 	/* Input */
68 	uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
69 	uvcb.num = vcpu->arch.sie_block->icpua;
70 	uvcb.state_origin = (u64)vcpu->arch.sie_block;
71 	uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
72 
73 	/* Alloc Secure Instruction Data Area Designation */
74 	vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL | __GFP_ZERO);
75 	if (!vcpu->arch.sie_block->sidad) {
76 		free_pages(vcpu->arch.pv.stor_base,
77 			   get_order(uv_info.guest_cpu_stor_len));
78 		return -ENOMEM;
79 	}
80 
81 	cc = uv_call(0, (u64)&uvcb);
82 	*rc = uvcb.header.rc;
83 	*rrc = uvcb.header.rrc;
84 	KVM_UV_EVENT(vcpu->kvm, 3,
85 		     "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
86 		     vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
87 		     uvcb.header.rrc);
88 
89 	if (cc) {
90 		u16 dummy;
91 
92 		kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
93 		return -EIO;
94 	}
95 
96 	/* Output */
97 	vcpu->arch.pv.handle = uvcb.cpu_handle;
98 	vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
99 	vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
100 	vcpu->arch.sie_block->sdf = 2;
101 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
102 	return 0;
103 }
104 
105 /* only free resources when the destroy was successful */
kvm_s390_pv_dealloc_vm(struct kvm * kvm)106 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
107 {
108 	vfree(kvm->arch.pv.stor_var);
109 	free_pages(kvm->arch.pv.stor_base,
110 		   get_order(uv_info.guest_base_stor_len));
111 	memset(&kvm->arch.pv, 0, sizeof(kvm->arch.pv));
112 }
113 
kvm_s390_pv_alloc_vm(struct kvm * kvm)114 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
115 {
116 	unsigned long base = uv_info.guest_base_stor_len;
117 	unsigned long virt = uv_info.guest_virt_var_stor_len;
118 	unsigned long npages = 0, vlen = 0;
119 	struct kvm_memory_slot *memslot;
120 
121 	kvm->arch.pv.stor_var = NULL;
122 	kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL, get_order(base));
123 	if (!kvm->arch.pv.stor_base)
124 		return -ENOMEM;
125 
126 	/*
127 	 * Calculate current guest storage for allocation of the
128 	 * variable storage, which is based on the length in MB.
129 	 *
130 	 * Slots are sorted by GFN
131 	 */
132 	mutex_lock(&kvm->slots_lock);
133 	memslot = kvm_memslots(kvm)->memslots;
134 	npages = memslot->base_gfn + memslot->npages;
135 	mutex_unlock(&kvm->slots_lock);
136 
137 	kvm->arch.pv.guest_len = npages * PAGE_SIZE;
138 
139 	/* Allocate variable storage */
140 	vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
141 	vlen += uv_info.guest_virt_base_stor_len;
142 	kvm->arch.pv.stor_var = vzalloc(vlen);
143 	if (!kvm->arch.pv.stor_var)
144 		goto out_err;
145 	return 0;
146 
147 out_err:
148 	kvm_s390_pv_dealloc_vm(kvm);
149 	return -ENOMEM;
150 }
151 
152 /* this should not fail, but if it does, we must not free the donated memory */
kvm_s390_pv_deinit_vm(struct kvm * kvm,u16 * rc,u16 * rrc)153 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
154 {
155 	int cc;
156 
157 	/* make all pages accessible before destroying the guest */
158 	s390_reset_acc(kvm->mm);
159 
160 	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
161 			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
162 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
163 	atomic_set(&kvm->mm->context.is_protected, 0);
164 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
165 	WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
166 	/* Inteded memory leak on "impossible" error */
167 	if (!cc)
168 		kvm_s390_pv_dealloc_vm(kvm);
169 	return cc ? -EIO : 0;
170 }
171 
kvm_s390_pv_init_vm(struct kvm * kvm,u16 * rc,u16 * rrc)172 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
173 {
174 	struct uv_cb_cgc uvcb = {
175 		.header.cmd = UVC_CMD_CREATE_SEC_CONF,
176 		.header.len = sizeof(uvcb)
177 	};
178 	int cc, ret;
179 	u16 dummy;
180 
181 	ret = kvm_s390_pv_alloc_vm(kvm);
182 	if (ret)
183 		return ret;
184 
185 	/* Inputs */
186 	uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
187 	uvcb.guest_stor_len = kvm->arch.pv.guest_len;
188 	uvcb.guest_asce = kvm->arch.gmap->asce;
189 	uvcb.guest_sca = (unsigned long)kvm->arch.sca;
190 	uvcb.conf_base_stor_origin = (u64)kvm->arch.pv.stor_base;
191 	uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
192 
193 	cc = uv_call_sched(0, (u64)&uvcb);
194 	*rc = uvcb.header.rc;
195 	*rrc = uvcb.header.rrc;
196 	KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
197 		     uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
198 
199 	/* Outputs */
200 	kvm->arch.pv.handle = uvcb.guest_handle;
201 
202 	if (cc) {
203 		if (uvcb.header.rc & UVC_RC_NEED_DESTROY)
204 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
205 		else
206 			kvm_s390_pv_dealloc_vm(kvm);
207 		return -EIO;
208 	}
209 	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
210 	return 0;
211 }
212 
kvm_s390_pv_set_sec_parms(struct kvm * kvm,void * hdr,u64 length,u16 * rc,u16 * rrc)213 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
214 			      u16 *rrc)
215 {
216 	struct uv_cb_ssc uvcb = {
217 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
218 		.header.len = sizeof(uvcb),
219 		.sec_header_origin = (u64)hdr,
220 		.sec_header_len = length,
221 		.guest_handle = kvm_s390_pv_get_handle(kvm),
222 	};
223 	int cc = uv_call(0, (u64)&uvcb);
224 
225 	*rc = uvcb.header.rc;
226 	*rrc = uvcb.header.rrc;
227 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
228 		     *rc, *rrc);
229 	if (!cc)
230 		atomic_set(&kvm->mm->context.is_protected, 1);
231 	return cc ? -EINVAL : 0;
232 }
233 
unpack_one(struct kvm * kvm,unsigned long addr,u64 tweak,u64 offset,u16 * rc,u16 * rrc)234 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
235 		      u64 offset, u16 *rc, u16 *rrc)
236 {
237 	struct uv_cb_unp uvcb = {
238 		.header.cmd = UVC_CMD_UNPACK_IMG,
239 		.header.len = sizeof(uvcb),
240 		.guest_handle = kvm_s390_pv_get_handle(kvm),
241 		.gaddr = addr,
242 		.tweak[0] = tweak,
243 		.tweak[1] = offset,
244 	};
245 	int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
246 
247 	*rc = uvcb.header.rc;
248 	*rrc = uvcb.header.rrc;
249 
250 	if (ret && ret != -EAGAIN)
251 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
252 			     uvcb.gaddr, *rc, *rrc);
253 	return ret;
254 }
255 
kvm_s390_pv_unpack(struct kvm * kvm,unsigned long addr,unsigned long size,unsigned long tweak,u16 * rc,u16 * rrc)256 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
257 		       unsigned long tweak, u16 *rc, u16 *rrc)
258 {
259 	u64 offset = 0;
260 	int ret = 0;
261 
262 	if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
263 		return -EINVAL;
264 
265 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
266 		     addr, size);
267 
268 	while (offset < size) {
269 		ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
270 		if (ret == -EAGAIN) {
271 			cond_resched();
272 			if (fatal_signal_pending(current))
273 				break;
274 			continue;
275 		}
276 		if (ret)
277 			break;
278 		addr += PAGE_SIZE;
279 		offset += PAGE_SIZE;
280 	}
281 	if (!ret)
282 		KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
283 	return ret;
284 }
285 
kvm_s390_pv_set_cpu_state(struct kvm_vcpu * vcpu,u8 state)286 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
287 {
288 	struct uv_cb_cpu_set_state uvcb = {
289 		.header.cmd	= UVC_CMD_CPU_SET_STATE,
290 		.header.len	= sizeof(uvcb),
291 		.cpu_handle	= kvm_s390_pv_cpu_get_handle(vcpu),
292 		.state		= state,
293 	};
294 	int cc;
295 
296 	cc = uv_call(0, (u64)&uvcb);
297 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
298 		     vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
299 	if (cc)
300 		return -EINVAL;
301 	return 0;
302 }
303