1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common Ultravisor functions and initialization
4 *
5 * Copyright IBM Corp. 2019, 2020
6 */
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
19 #include <asm/uv.h>
20
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
24 #endif
25
26 struct uv_info __bootdata_preserved(uv_info);
27
28 #if IS_ENABLED(CONFIG_KVM)
29 int __bootdata_preserved(prot_virt_host);
30 EXPORT_SYMBOL(prot_virt_host);
31 EXPORT_SYMBOL(uv_info);
32
uv_init(unsigned long stor_base,unsigned long stor_len)33 static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
34 {
35 struct uv_cb_init uvcb = {
36 .header.cmd = UVC_CMD_INIT_UV,
37 .header.len = sizeof(uvcb),
38 .stor_origin = stor_base,
39 .stor_len = stor_len,
40 };
41
42 if (uv_call(0, (uint64_t)&uvcb)) {
43 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 uvcb.header.rc, uvcb.header.rrc);
45 return -1;
46 }
47 return 0;
48 }
49
setup_uv(void)50 void __init setup_uv(void)
51 {
52 unsigned long uv_stor_base;
53
54 /*
55 * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
56 */
57 if (!is_prot_virt_host())
58 return;
59
60 if (is_prot_virt_guest()) {
61 prot_virt_host = 0;
62 pr_warn("Protected virtualization not available in protected guests.");
63 return;
64 }
65
66 if (!test_facility(158)) {
67 prot_virt_host = 0;
68 pr_warn("Protected virtualization not supported by the hardware.");
69 return;
70 }
71
72 uv_stor_base = (unsigned long)memblock_alloc_try_nid(
73 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
74 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
75 if (!uv_stor_base) {
76 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
77 uv_info.uv_base_stor_len);
78 goto fail;
79 }
80
81 if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
82 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
83 goto fail;
84 }
85
86 pr_info("Reserving %luMB as ultravisor base storage\n",
87 uv_info.uv_base_stor_len >> 20);
88 return;
89 fail:
90 pr_info("Disabling support for protected virtualization");
91 prot_virt_host = 0;
92 }
93
adjust_to_uv_max(unsigned long * vmax)94 void adjust_to_uv_max(unsigned long *vmax)
95 {
96 if (uv_info.max_sec_stor_addr)
97 *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
98 }
99
100 /*
101 * Requests the Ultravisor to pin the page in the shared state. This will
102 * cause an intercept when the guest attempts to unshare the pinned page.
103 */
uv_pin_shared(unsigned long paddr)104 static int uv_pin_shared(unsigned long paddr)
105 {
106 struct uv_cb_cfs uvcb = {
107 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
108 .header.len = sizeof(uvcb),
109 .paddr = paddr,
110 };
111
112 if (uv_call(0, (u64)&uvcb))
113 return -EINVAL;
114 return 0;
115 }
116
117 /*
118 * Requests the Ultravisor to destroy a guest page and make it
119 * accessible to the host. The destroy clears the page instead of
120 * exporting.
121 *
122 * @paddr: Absolute host address of page to be destroyed
123 */
uv_destroy_page(unsigned long paddr)124 int uv_destroy_page(unsigned long paddr)
125 {
126 struct uv_cb_cfs uvcb = {
127 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
128 .header.len = sizeof(uvcb),
129 .paddr = paddr
130 };
131
132 if (uv_call(0, (u64)&uvcb)) {
133 /*
134 * Older firmware uses 107/d as an indication of a non secure
135 * page. Let us emulate the newer variant (no-op).
136 */
137 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
138 return 0;
139 return -EINVAL;
140 }
141 return 0;
142 }
143
144 /*
145 * Requests the Ultravisor to encrypt a guest page and make it
146 * accessible to the host for paging (export).
147 *
148 * @paddr: Absolute host address of page to be exported
149 */
uv_convert_from_secure(unsigned long paddr)150 int uv_convert_from_secure(unsigned long paddr)
151 {
152 struct uv_cb_cfs uvcb = {
153 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
154 .header.len = sizeof(uvcb),
155 .paddr = paddr
156 };
157
158 if (uv_call(0, (u64)&uvcb))
159 return -EINVAL;
160 return 0;
161 }
162
163 /*
164 * Calculate the expected ref_count for a page that would otherwise have no
165 * further pins. This was cribbed from similar functions in other places in
166 * the kernel, but with some slight modifications. We know that a secure
167 * page can not be a huge page for example.
168 */
expected_page_refs(struct page * page)169 static int expected_page_refs(struct page *page)
170 {
171 int res;
172
173 res = page_mapcount(page);
174 if (PageSwapCache(page)) {
175 res++;
176 } else if (page_mapping(page)) {
177 res++;
178 if (page_has_private(page))
179 res++;
180 }
181 return res;
182 }
183
make_secure_pte(pte_t * ptep,unsigned long addr,struct page * exp_page,struct uv_cb_header * uvcb)184 static int make_secure_pte(pte_t *ptep, unsigned long addr,
185 struct page *exp_page, struct uv_cb_header *uvcb)
186 {
187 pte_t entry = READ_ONCE(*ptep);
188 struct page *page;
189 int expected, rc = 0;
190
191 if (!pte_present(entry))
192 return -ENXIO;
193 if (pte_val(entry) & _PAGE_INVALID)
194 return -ENXIO;
195
196 page = pte_page(entry);
197 if (page != exp_page)
198 return -ENXIO;
199 if (PageWriteback(page))
200 return -EAGAIN;
201 expected = expected_page_refs(page);
202 if (!page_ref_freeze(page, expected))
203 return -EBUSY;
204 set_bit(PG_arch_1, &page->flags);
205 rc = uv_call(0, (u64)uvcb);
206 page_ref_unfreeze(page, expected);
207 /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
208 if (rc)
209 rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
210 return rc;
211 }
212
213 /*
214 * Requests the Ultravisor to make a page accessible to a guest.
215 * If it's brought in the first time, it will be cleared. If
216 * it has been exported before, it will be decrypted and integrity
217 * checked.
218 */
gmap_make_secure(struct gmap * gmap,unsigned long gaddr,void * uvcb)219 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
220 {
221 struct vm_area_struct *vma;
222 bool local_drain = false;
223 spinlock_t *ptelock;
224 unsigned long uaddr;
225 struct page *page;
226 pte_t *ptep;
227 int rc;
228
229 again:
230 rc = -EFAULT;
231 mmap_read_lock(gmap->mm);
232
233 uaddr = __gmap_translate(gmap, gaddr);
234 if (IS_ERR_VALUE(uaddr))
235 goto out;
236 vma = find_vma(gmap->mm, uaddr);
237 if (!vma)
238 goto out;
239 /*
240 * Secure pages cannot be huge and userspace should not combine both.
241 * In case userspace does it anyway this will result in an -EFAULT for
242 * the unpack. The guest is thus never reaching secure mode. If
243 * userspace is playing dirty tricky with mapping huge pages later
244 * on this will result in a segmentation fault.
245 */
246 if (is_vm_hugetlb_page(vma))
247 goto out;
248
249 rc = -ENXIO;
250 page = follow_page(vma, uaddr, FOLL_WRITE);
251 if (IS_ERR_OR_NULL(page))
252 goto out;
253
254 lock_page(page);
255 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
256 rc = make_secure_pte(ptep, uaddr, page, uvcb);
257 pte_unmap_unlock(ptep, ptelock);
258 unlock_page(page);
259 out:
260 mmap_read_unlock(gmap->mm);
261
262 if (rc == -EAGAIN) {
263 wait_on_page_writeback(page);
264 } else if (rc == -EBUSY) {
265 /*
266 * If we have tried a local drain and the page refcount
267 * still does not match our expected safe value, try with a
268 * system wide drain. This is needed if the pagevecs holding
269 * the page are on a different CPU.
270 */
271 if (local_drain) {
272 lru_add_drain_all();
273 /* We give up here, and let the caller try again */
274 return -EAGAIN;
275 }
276 /*
277 * We are here if the page refcount does not match the
278 * expected safe value. The main culprits are usually
279 * pagevecs. With lru_add_drain() we drain the pagevecs
280 * on the local CPU so that hopefully the refcount will
281 * reach the expected safe value.
282 */
283 lru_add_drain();
284 local_drain = true;
285 /* And now we try again immediately after draining */
286 goto again;
287 } else if (rc == -ENXIO) {
288 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
289 return -EFAULT;
290 return -EAGAIN;
291 }
292 return rc;
293 }
294 EXPORT_SYMBOL_GPL(gmap_make_secure);
295
gmap_convert_to_secure(struct gmap * gmap,unsigned long gaddr)296 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
297 {
298 struct uv_cb_cts uvcb = {
299 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
300 .header.len = sizeof(uvcb),
301 .guest_handle = gmap->guest_handle,
302 .gaddr = gaddr,
303 };
304
305 return gmap_make_secure(gmap, gaddr, &uvcb);
306 }
307 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
308
309 /*
310 * To be called with the page locked or with an extra reference! This will
311 * prevent gmap_make_secure from touching the page concurrently. Having 2
312 * parallel make_page_accessible is fine, as the UV calls will become a
313 * no-op if the page is already exported.
314 */
arch_make_page_accessible(struct page * page)315 int arch_make_page_accessible(struct page *page)
316 {
317 int rc = 0;
318
319 /* Hugepage cannot be protected, so nothing to do */
320 if (PageHuge(page))
321 return 0;
322
323 /*
324 * PG_arch_1 is used in 3 places:
325 * 1. for kernel page tables during early boot
326 * 2. for storage keys of huge pages and KVM
327 * 3. As an indication that this page might be secure. This can
328 * overindicate, e.g. we set the bit before calling
329 * convert_to_secure.
330 * As secure pages are never huge, all 3 variants can co-exists.
331 */
332 if (!test_bit(PG_arch_1, &page->flags))
333 return 0;
334
335 rc = uv_pin_shared(page_to_phys(page));
336 if (!rc) {
337 clear_bit(PG_arch_1, &page->flags);
338 return 0;
339 }
340
341 rc = uv_convert_from_secure(page_to_phys(page));
342 if (!rc) {
343 clear_bit(PG_arch_1, &page->flags);
344 return 0;
345 }
346
347 return rc;
348 }
349 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
350
351 #endif
352
353 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
uv_query_facilities(struct kobject * kobj,struct kobj_attribute * attr,char * page)354 static ssize_t uv_query_facilities(struct kobject *kobj,
355 struct kobj_attribute *attr, char *page)
356 {
357 return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
358 uv_info.inst_calls_list[0],
359 uv_info.inst_calls_list[1],
360 uv_info.inst_calls_list[2],
361 uv_info.inst_calls_list[3]);
362 }
363
364 static struct kobj_attribute uv_query_facilities_attr =
365 __ATTR(facilities, 0444, uv_query_facilities, NULL);
366
uv_query_feature_indications(struct kobject * kobj,struct kobj_attribute * attr,char * buf)367 static ssize_t uv_query_feature_indications(struct kobject *kobj,
368 struct kobj_attribute *attr, char *buf)
369 {
370 return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
371 }
372
373 static struct kobj_attribute uv_query_feature_indications_attr =
374 __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
375
uv_query_max_guest_cpus(struct kobject * kobj,struct kobj_attribute * attr,char * page)376 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
377 struct kobj_attribute *attr, char *page)
378 {
379 return scnprintf(page, PAGE_SIZE, "%d\n",
380 uv_info.max_guest_cpu_id + 1);
381 }
382
383 static struct kobj_attribute uv_query_max_guest_cpus_attr =
384 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
385
uv_query_max_guest_vms(struct kobject * kobj,struct kobj_attribute * attr,char * page)386 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
387 struct kobj_attribute *attr, char *page)
388 {
389 return scnprintf(page, PAGE_SIZE, "%d\n",
390 uv_info.max_num_sec_conf);
391 }
392
393 static struct kobj_attribute uv_query_max_guest_vms_attr =
394 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
395
uv_query_max_guest_addr(struct kobject * kobj,struct kobj_attribute * attr,char * page)396 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
397 struct kobj_attribute *attr, char *page)
398 {
399 return scnprintf(page, PAGE_SIZE, "%lx\n",
400 uv_info.max_sec_stor_addr);
401 }
402
403 static struct kobj_attribute uv_query_max_guest_addr_attr =
404 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
405
406 static struct attribute *uv_query_attrs[] = {
407 &uv_query_facilities_attr.attr,
408 &uv_query_feature_indications_attr.attr,
409 &uv_query_max_guest_cpus_attr.attr,
410 &uv_query_max_guest_vms_attr.attr,
411 &uv_query_max_guest_addr_attr.attr,
412 NULL,
413 };
414
415 static struct attribute_group uv_query_attr_group = {
416 .attrs = uv_query_attrs,
417 };
418
419 static struct kset *uv_query_kset;
420 static struct kobject *uv_kobj;
421
uv_info_init(void)422 static int __init uv_info_init(void)
423 {
424 int rc = -ENOMEM;
425
426 if (!test_facility(158))
427 return 0;
428
429 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
430 if (!uv_kobj)
431 return -ENOMEM;
432
433 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
434 if (!uv_query_kset)
435 goto out_kobj;
436
437 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
438 if (!rc)
439 return 0;
440
441 kset_unregister(uv_query_kset);
442 out_kobj:
443 kobject_del(uv_kobj);
444 kobject_put(uv_kobj);
445 return rc;
446 }
447 device_initcall(uv_info_init);
448 #endif
449