1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Secure Encrypted Virtualization (SEV) interface
4 *
5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
6 *
7 * Author: Brijesh Singh <brijesh.singh@amd.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/kthread.h>
13 #include <linux/sched.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/spinlock_types.h>
17 #include <linux/types.h>
18 #include <linux/mutex.h>
19 #include <linux/delay.h>
20 #include <linux/hw_random.h>
21 #include <linux/ccp.h>
22 #include <linux/firmware.h>
23 #include <linux/gfp.h>
24
25 #include <asm/smp.h>
26
27 #include "psp-dev.h"
28 #include "sev-dev.h"
29
30 #define DEVICE_NAME "sev"
31 #define SEV_FW_FILE "amd/sev.fw"
32 #define SEV_FW_NAME_SIZE 64
33
34 static DEFINE_MUTEX(sev_cmd_mutex);
35 static struct sev_misc_dev *misc_dev;
36
37 static int psp_cmd_timeout = 100;
38 module_param(psp_cmd_timeout, int, 0644);
39 MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
40
41 static int psp_probe_timeout = 5;
42 module_param(psp_probe_timeout, int, 0644);
43 MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
44
45 MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
46 MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
47 MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
48
49 static bool psp_dead;
50 static int psp_timeout;
51
52 /* Trusted Memory Region (TMR):
53 * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
54 * to allocate the memory, which will return aligned memory for the specified
55 * allocation order.
56 */
57 #define SEV_ES_TMR_SIZE (1024 * 1024)
58 static void *sev_es_tmr;
59
sev_version_greater_or_equal(u8 maj,u8 min)60 static inline bool sev_version_greater_or_equal(u8 maj, u8 min)
61 {
62 struct sev_device *sev = psp_master->sev_data;
63
64 if (sev->api_major > maj)
65 return true;
66
67 if (sev->api_major == maj && sev->api_minor >= min)
68 return true;
69
70 return false;
71 }
72
sev_irq_handler(int irq,void * data,unsigned int status)73 static void sev_irq_handler(int irq, void *data, unsigned int status)
74 {
75 struct sev_device *sev = data;
76 int reg;
77
78 /* Check if it is command completion: */
79 if (!(status & SEV_CMD_COMPLETE))
80 return;
81
82 /* Check if it is SEV command completion: */
83 reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
84 if (reg & PSP_CMDRESP_RESP) {
85 sev->int_rcvd = 1;
86 wake_up(&sev->int_queue);
87 }
88 }
89
sev_wait_cmd_ioc(struct sev_device * sev,unsigned int * reg,unsigned int timeout)90 static int sev_wait_cmd_ioc(struct sev_device *sev,
91 unsigned int *reg, unsigned int timeout)
92 {
93 int ret;
94
95 ret = wait_event_timeout(sev->int_queue,
96 sev->int_rcvd, timeout * HZ);
97 if (!ret)
98 return -ETIMEDOUT;
99
100 *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg);
101
102 return 0;
103 }
104
sev_cmd_buffer_len(int cmd)105 static int sev_cmd_buffer_len(int cmd)
106 {
107 switch (cmd) {
108 case SEV_CMD_INIT: return sizeof(struct sev_data_init);
109 case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
110 case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
111 case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
112 case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
113 case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
114 case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
115 case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
116 case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
117 case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
118 case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
119 case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
120 case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
121 case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
122 case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
123 case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
124 case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
125 case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
126 case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
127 case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
128 case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
129 case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
130 case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
131 case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
132 case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
133 case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
134 case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
135 default: return 0;
136 }
137
138 return 0;
139 }
140
__sev_do_cmd_locked(int cmd,void * data,int * psp_ret)141 static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
142 {
143 struct psp_device *psp = psp_master;
144 struct sev_device *sev;
145 unsigned int phys_lsb, phys_msb;
146 unsigned int reg, ret = 0;
147
148 if (!psp || !psp->sev_data)
149 return -ENODEV;
150
151 if (psp_dead)
152 return -EBUSY;
153
154 sev = psp->sev_data;
155
156 if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
157 return -EINVAL;
158
159 /* Get the physical address of the command buffer */
160 phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
161 phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
162
163 dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
164 cmd, phys_msb, phys_lsb, psp_timeout);
165
166 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
167 sev_cmd_buffer_len(cmd), false);
168
169 iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
170 iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
171
172 sev->int_rcvd = 0;
173
174 reg = cmd;
175 reg <<= SEV_CMDRESP_CMD_SHIFT;
176 reg |= SEV_CMDRESP_IOC;
177 iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg);
178
179 /* wait for command completion */
180 ret = sev_wait_cmd_ioc(sev, ®, psp_timeout);
181 if (ret) {
182 if (psp_ret)
183 *psp_ret = 0;
184
185 dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd);
186 psp_dead = true;
187
188 return ret;
189 }
190
191 psp_timeout = psp_cmd_timeout;
192
193 if (psp_ret)
194 *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
195
196 if (reg & PSP_CMDRESP_ERR_MASK) {
197 dev_dbg(sev->dev, "sev command %#x failed (%#010x)\n",
198 cmd, reg & PSP_CMDRESP_ERR_MASK);
199 ret = -EIO;
200 }
201
202 print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
203 sev_cmd_buffer_len(cmd), false);
204
205 return ret;
206 }
207
sev_do_cmd(int cmd,void * data,int * psp_ret)208 static int sev_do_cmd(int cmd, void *data, int *psp_ret)
209 {
210 int rc;
211
212 mutex_lock(&sev_cmd_mutex);
213 rc = __sev_do_cmd_locked(cmd, data, psp_ret);
214 mutex_unlock(&sev_cmd_mutex);
215
216 return rc;
217 }
218
__sev_platform_init_locked(int * error)219 static int __sev_platform_init_locked(int *error)
220 {
221 struct psp_device *psp = psp_master;
222 struct sev_device *sev;
223 int rc = 0;
224
225 if (!psp || !psp->sev_data)
226 return -ENODEV;
227
228 sev = psp->sev_data;
229
230 if (sev->state == SEV_STATE_INIT)
231 return 0;
232
233 if (sev_es_tmr) {
234 u64 tmr_pa;
235
236 /*
237 * Do not include the encryption mask on the physical
238 * address of the TMR (firmware should clear it anyway).
239 */
240 tmr_pa = __pa(sev_es_tmr);
241
242 sev->init_cmd_buf.flags |= SEV_INIT_FLAGS_SEV_ES;
243 sev->init_cmd_buf.tmr_address = tmr_pa;
244 sev->init_cmd_buf.tmr_len = SEV_ES_TMR_SIZE;
245 }
246
247 rc = __sev_do_cmd_locked(SEV_CMD_INIT, &sev->init_cmd_buf, error);
248 if (rc)
249 return rc;
250
251 sev->state = SEV_STATE_INIT;
252
253 /* Prepare for first SEV guest launch after INIT */
254 wbinvd_on_all_cpus();
255 rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error);
256 if (rc)
257 return rc;
258
259 dev_dbg(sev->dev, "SEV firmware initialized\n");
260
261 return rc;
262 }
263
sev_platform_init(int * error)264 int sev_platform_init(int *error)
265 {
266 int rc;
267
268 mutex_lock(&sev_cmd_mutex);
269 rc = __sev_platform_init_locked(error);
270 mutex_unlock(&sev_cmd_mutex);
271
272 return rc;
273 }
274 EXPORT_SYMBOL_GPL(sev_platform_init);
275
__sev_platform_shutdown_locked(int * error)276 static int __sev_platform_shutdown_locked(int *error)
277 {
278 struct sev_device *sev = psp_master->sev_data;
279 int ret;
280
281 if (!sev || sev->state == SEV_STATE_UNINIT)
282 return 0;
283
284 ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
285 if (ret)
286 return ret;
287
288 sev->state = SEV_STATE_UNINIT;
289 dev_dbg(sev->dev, "SEV firmware shutdown\n");
290
291 return ret;
292 }
293
sev_platform_shutdown(int * error)294 static int sev_platform_shutdown(int *error)
295 {
296 int rc;
297
298 mutex_lock(&sev_cmd_mutex);
299 rc = __sev_platform_shutdown_locked(NULL);
300 mutex_unlock(&sev_cmd_mutex);
301
302 return rc;
303 }
304
sev_get_platform_state(int * state,int * error)305 static int sev_get_platform_state(int *state, int *error)
306 {
307 struct sev_device *sev = psp_master->sev_data;
308 int rc;
309
310 rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
311 &sev->status_cmd_buf, error);
312 if (rc)
313 return rc;
314
315 *state = sev->status_cmd_buf.state;
316 return rc;
317 }
318
sev_ioctl_do_reset(struct sev_issue_cmd * argp,bool writable)319 static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable)
320 {
321 int state, rc;
322
323 if (!writable)
324 return -EPERM;
325
326 /*
327 * The SEV spec requires that FACTORY_RESET must be issued in
328 * UNINIT state. Before we go further lets check if any guest is
329 * active.
330 *
331 * If FW is in WORKING state then deny the request otherwise issue
332 * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
333 *
334 */
335 rc = sev_get_platform_state(&state, &argp->error);
336 if (rc)
337 return rc;
338
339 if (state == SEV_STATE_WORKING)
340 return -EBUSY;
341
342 if (state == SEV_STATE_INIT) {
343 rc = __sev_platform_shutdown_locked(&argp->error);
344 if (rc)
345 return rc;
346 }
347
348 return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
349 }
350
sev_ioctl_do_platform_status(struct sev_issue_cmd * argp)351 static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
352 {
353 struct sev_device *sev = psp_master->sev_data;
354 struct sev_user_data_status *data = &sev->status_cmd_buf;
355 int ret;
356
357 ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
358 if (ret)
359 return ret;
360
361 if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
362 ret = -EFAULT;
363
364 return ret;
365 }
366
sev_ioctl_do_pek_pdh_gen(int cmd,struct sev_issue_cmd * argp,bool writable)367 static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable)
368 {
369 struct sev_device *sev = psp_master->sev_data;
370 int rc;
371
372 if (!writable)
373 return -EPERM;
374
375 if (sev->state == SEV_STATE_UNINIT) {
376 rc = __sev_platform_init_locked(&argp->error);
377 if (rc)
378 return rc;
379 }
380
381 return __sev_do_cmd_locked(cmd, NULL, &argp->error);
382 }
383
sev_ioctl_do_pek_csr(struct sev_issue_cmd * argp,bool writable)384 static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
385 {
386 struct sev_device *sev = psp_master->sev_data;
387 struct sev_user_data_pek_csr input;
388 struct sev_data_pek_csr *data;
389 void __user *input_address;
390 void *blob = NULL;
391 int ret;
392
393 if (!writable)
394 return -EPERM;
395
396 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
397 return -EFAULT;
398
399 data = kzalloc(sizeof(*data), GFP_KERNEL);
400 if (!data)
401 return -ENOMEM;
402
403 /* userspace wants to query CSR length */
404 if (!input.address || !input.length)
405 goto cmd;
406
407 /* allocate a physically contiguous buffer to store the CSR blob */
408 input_address = (void __user *)input.address;
409 if (input.length > SEV_FW_BLOB_MAX_SIZE) {
410 ret = -EFAULT;
411 goto e_free;
412 }
413
414 blob = kmalloc(input.length, GFP_KERNEL);
415 if (!blob) {
416 ret = -ENOMEM;
417 goto e_free;
418 }
419
420 data->address = __psp_pa(blob);
421 data->len = input.length;
422
423 cmd:
424 if (sev->state == SEV_STATE_UNINIT) {
425 ret = __sev_platform_init_locked(&argp->error);
426 if (ret)
427 goto e_free_blob;
428 }
429
430 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
431
432 /* If we query the CSR length, FW responded with expected data. */
433 input.length = data->len;
434
435 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
436 ret = -EFAULT;
437 goto e_free_blob;
438 }
439
440 if (blob) {
441 if (copy_to_user(input_address, blob, input.length))
442 ret = -EFAULT;
443 }
444
445 e_free_blob:
446 kfree(blob);
447 e_free:
448 kfree(data);
449 return ret;
450 }
451
psp_copy_user_blob(u64 uaddr,u32 len)452 void *psp_copy_user_blob(u64 uaddr, u32 len)
453 {
454 if (!uaddr || !len)
455 return ERR_PTR(-EINVAL);
456
457 /* verify that blob length does not exceed our limit */
458 if (len > SEV_FW_BLOB_MAX_SIZE)
459 return ERR_PTR(-EINVAL);
460
461 return memdup_user((void __user *)uaddr, len);
462 }
463 EXPORT_SYMBOL_GPL(psp_copy_user_blob);
464
sev_get_api_version(void)465 static int sev_get_api_version(void)
466 {
467 struct sev_device *sev = psp_master->sev_data;
468 struct sev_user_data_status *status;
469 int error = 0, ret;
470
471 status = &sev->status_cmd_buf;
472 ret = sev_platform_status(status, &error);
473 if (ret) {
474 dev_err(sev->dev,
475 "SEV: failed to get status. Error: %#x\n", error);
476 return 1;
477 }
478
479 sev->api_major = status->api_major;
480 sev->api_minor = status->api_minor;
481 sev->build = status->build;
482 sev->state = status->state;
483
484 return 0;
485 }
486
sev_get_firmware(struct device * dev,const struct firmware ** firmware)487 static int sev_get_firmware(struct device *dev,
488 const struct firmware **firmware)
489 {
490 char fw_name_specific[SEV_FW_NAME_SIZE];
491 char fw_name_subset[SEV_FW_NAME_SIZE];
492
493 snprintf(fw_name_specific, sizeof(fw_name_specific),
494 "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
495 boot_cpu_data.x86, boot_cpu_data.x86_model);
496
497 snprintf(fw_name_subset, sizeof(fw_name_subset),
498 "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
499 boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
500
501 /* Check for SEV FW for a particular model.
502 * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
503 *
504 * or
505 *
506 * Check for SEV FW common to a subset of models.
507 * Ex. amd_sev_fam17h_model0xh.sbin for
508 * Family 17h Model 00h -- Family 17h Model 0Fh
509 *
510 * or
511 *
512 * Fall-back to using generic name: sev.fw
513 */
514 if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
515 (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
516 (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
517 return 0;
518
519 return -ENOENT;
520 }
521
522 /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
sev_update_firmware(struct device * dev)523 static int sev_update_firmware(struct device *dev)
524 {
525 struct sev_data_download_firmware *data;
526 const struct firmware *firmware;
527 int ret, error, order;
528 struct page *p;
529 u64 data_size;
530
531 if (sev_get_firmware(dev, &firmware) == -ENOENT) {
532 dev_dbg(dev, "No SEV firmware file present\n");
533 return -1;
534 }
535
536 /*
537 * SEV FW expects the physical address given to it to be 32
538 * byte aligned. Memory allocated has structure placed at the
539 * beginning followed by the firmware being passed to the SEV
540 * FW. Allocate enough memory for data structure + alignment
541 * padding + SEV FW.
542 */
543 data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
544
545 order = get_order(firmware->size + data_size);
546 p = alloc_pages(GFP_KERNEL, order);
547 if (!p) {
548 ret = -1;
549 goto fw_err;
550 }
551
552 /*
553 * Copy firmware data to a kernel allocated contiguous
554 * memory region.
555 */
556 data = page_address(p);
557 memcpy(page_address(p) + data_size, firmware->data, firmware->size);
558
559 data->address = __psp_pa(page_address(p) + data_size);
560 data->len = firmware->size;
561
562 ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
563 if (ret)
564 dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
565 else
566 dev_info(dev, "SEV firmware update successful\n");
567
568 __free_pages(p, order);
569
570 fw_err:
571 release_firmware(firmware);
572
573 return ret;
574 }
575
sev_ioctl_do_pek_import(struct sev_issue_cmd * argp,bool writable)576 static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
577 {
578 struct sev_device *sev = psp_master->sev_data;
579 struct sev_user_data_pek_cert_import input;
580 struct sev_data_pek_cert_import *data;
581 void *pek_blob, *oca_blob;
582 int ret;
583
584 if (!writable)
585 return -EPERM;
586
587 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
588 return -EFAULT;
589
590 data = kzalloc(sizeof(*data), GFP_KERNEL);
591 if (!data)
592 return -ENOMEM;
593
594 /* copy PEK certificate blobs from userspace */
595 pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
596 if (IS_ERR(pek_blob)) {
597 ret = PTR_ERR(pek_blob);
598 goto e_free;
599 }
600
601 data->pek_cert_address = __psp_pa(pek_blob);
602 data->pek_cert_len = input.pek_cert_len;
603
604 /* copy PEK certificate blobs from userspace */
605 oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
606 if (IS_ERR(oca_blob)) {
607 ret = PTR_ERR(oca_blob);
608 goto e_free_pek;
609 }
610
611 data->oca_cert_address = __psp_pa(oca_blob);
612 data->oca_cert_len = input.oca_cert_len;
613
614 /* If platform is not in INIT state then transition it to INIT */
615 if (sev->state != SEV_STATE_INIT) {
616 ret = __sev_platform_init_locked(&argp->error);
617 if (ret)
618 goto e_free_oca;
619 }
620
621 ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
622
623 e_free_oca:
624 kfree(oca_blob);
625 e_free_pek:
626 kfree(pek_blob);
627 e_free:
628 kfree(data);
629 return ret;
630 }
631
sev_ioctl_do_get_id2(struct sev_issue_cmd * argp)632 static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
633 {
634 struct sev_user_data_get_id2 input;
635 struct sev_data_get_id *data;
636 void __user *input_address;
637 void *id_blob = NULL;
638 int ret;
639
640 /* SEV GET_ID is available from SEV API v0.16 and up */
641 if (!sev_version_greater_or_equal(0, 16))
642 return -ENOTSUPP;
643
644 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
645 return -EFAULT;
646
647 input_address = (void __user *)input.address;
648
649 data = kzalloc(sizeof(*data), GFP_KERNEL);
650 if (!data)
651 return -ENOMEM;
652
653 if (input.address && input.length) {
654 id_blob = kmalloc(input.length, GFP_KERNEL);
655 if (!id_blob) {
656 kfree(data);
657 return -ENOMEM;
658 }
659
660 data->address = __psp_pa(id_blob);
661 data->len = input.length;
662 }
663
664 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
665
666 /*
667 * Firmware will return the length of the ID value (either the minimum
668 * required length or the actual length written), return it to the user.
669 */
670 input.length = data->len;
671
672 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
673 ret = -EFAULT;
674 goto e_free;
675 }
676
677 if (id_blob) {
678 if (copy_to_user(input_address, id_blob, data->len)) {
679 ret = -EFAULT;
680 goto e_free;
681 }
682 }
683
684 e_free:
685 kfree(id_blob);
686 kfree(data);
687
688 return ret;
689 }
690
sev_ioctl_do_get_id(struct sev_issue_cmd * argp)691 static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
692 {
693 struct sev_data_get_id *data;
694 u64 data_size, user_size;
695 void *id_blob, *mem;
696 int ret;
697
698 /* SEV GET_ID available from SEV API v0.16 and up */
699 if (!sev_version_greater_or_equal(0, 16))
700 return -ENOTSUPP;
701
702 /* SEV FW expects the buffer it fills with the ID to be
703 * 8-byte aligned. Memory allocated should be enough to
704 * hold data structure + alignment padding + memory
705 * where SEV FW writes the ID.
706 */
707 data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
708 user_size = sizeof(struct sev_user_data_get_id);
709
710 mem = kzalloc(data_size + user_size, GFP_KERNEL);
711 if (!mem)
712 return -ENOMEM;
713
714 data = mem;
715 id_blob = mem + data_size;
716
717 data->address = __psp_pa(id_blob);
718 data->len = user_size;
719
720 ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
721 if (!ret) {
722 if (copy_to_user((void __user *)argp->data, id_blob, data->len))
723 ret = -EFAULT;
724 }
725
726 kfree(mem);
727
728 return ret;
729 }
730
sev_ioctl_do_pdh_export(struct sev_issue_cmd * argp,bool writable)731 static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
732 {
733 struct sev_device *sev = psp_master->sev_data;
734 struct sev_user_data_pdh_cert_export input;
735 void *pdh_blob = NULL, *cert_blob = NULL;
736 struct sev_data_pdh_cert_export *data;
737 void __user *input_cert_chain_address;
738 void __user *input_pdh_cert_address;
739 int ret;
740
741 /* If platform is not in INIT state then transition it to INIT. */
742 if (sev->state != SEV_STATE_INIT) {
743 if (!writable)
744 return -EPERM;
745
746 ret = __sev_platform_init_locked(&argp->error);
747 if (ret)
748 return ret;
749 }
750
751 if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
752 return -EFAULT;
753
754 data = kzalloc(sizeof(*data), GFP_KERNEL);
755 if (!data)
756 return -ENOMEM;
757
758 /* Userspace wants to query the certificate length. */
759 if (!input.pdh_cert_address ||
760 !input.pdh_cert_len ||
761 !input.cert_chain_address)
762 goto cmd;
763
764 input_pdh_cert_address = (void __user *)input.pdh_cert_address;
765 input_cert_chain_address = (void __user *)input.cert_chain_address;
766
767 /* Allocate a physically contiguous buffer to store the PDH blob. */
768 if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
769 ret = -EFAULT;
770 goto e_free;
771 }
772
773 /* Allocate a physically contiguous buffer to store the cert chain blob. */
774 if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) {
775 ret = -EFAULT;
776 goto e_free;
777 }
778
779 pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
780 if (!pdh_blob) {
781 ret = -ENOMEM;
782 goto e_free;
783 }
784
785 data->pdh_cert_address = __psp_pa(pdh_blob);
786 data->pdh_cert_len = input.pdh_cert_len;
787
788 cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
789 if (!cert_blob) {
790 ret = -ENOMEM;
791 goto e_free_pdh;
792 }
793
794 data->cert_chain_address = __psp_pa(cert_blob);
795 data->cert_chain_len = input.cert_chain_len;
796
797 cmd:
798 ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
799
800 /* If we query the length, FW responded with expected data. */
801 input.cert_chain_len = data->cert_chain_len;
802 input.pdh_cert_len = data->pdh_cert_len;
803
804 if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
805 ret = -EFAULT;
806 goto e_free_cert;
807 }
808
809 if (pdh_blob) {
810 if (copy_to_user(input_pdh_cert_address,
811 pdh_blob, input.pdh_cert_len)) {
812 ret = -EFAULT;
813 goto e_free_cert;
814 }
815 }
816
817 if (cert_blob) {
818 if (copy_to_user(input_cert_chain_address,
819 cert_blob, input.cert_chain_len))
820 ret = -EFAULT;
821 }
822
823 e_free_cert:
824 kfree(cert_blob);
825 e_free_pdh:
826 kfree(pdh_blob);
827 e_free:
828 kfree(data);
829 return ret;
830 }
831
sev_ioctl(struct file * file,unsigned int ioctl,unsigned long arg)832 static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
833 {
834 void __user *argp = (void __user *)arg;
835 struct sev_issue_cmd input;
836 int ret = -EFAULT;
837 bool writable = file->f_mode & FMODE_WRITE;
838
839 if (!psp_master || !psp_master->sev_data)
840 return -ENODEV;
841
842 if (ioctl != SEV_ISSUE_CMD)
843 return -EINVAL;
844
845 if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
846 return -EFAULT;
847
848 if (input.cmd > SEV_MAX)
849 return -EINVAL;
850
851 mutex_lock(&sev_cmd_mutex);
852
853 switch (input.cmd) {
854
855 case SEV_FACTORY_RESET:
856 ret = sev_ioctl_do_reset(&input, writable);
857 break;
858 case SEV_PLATFORM_STATUS:
859 ret = sev_ioctl_do_platform_status(&input);
860 break;
861 case SEV_PEK_GEN:
862 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable);
863 break;
864 case SEV_PDH_GEN:
865 ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable);
866 break;
867 case SEV_PEK_CSR:
868 ret = sev_ioctl_do_pek_csr(&input, writable);
869 break;
870 case SEV_PEK_CERT_IMPORT:
871 ret = sev_ioctl_do_pek_import(&input, writable);
872 break;
873 case SEV_PDH_CERT_EXPORT:
874 ret = sev_ioctl_do_pdh_export(&input, writable);
875 break;
876 case SEV_GET_ID:
877 pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n");
878 ret = sev_ioctl_do_get_id(&input);
879 break;
880 case SEV_GET_ID2:
881 ret = sev_ioctl_do_get_id2(&input);
882 break;
883 default:
884 ret = -EINVAL;
885 goto out;
886 }
887
888 if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
889 ret = -EFAULT;
890 out:
891 mutex_unlock(&sev_cmd_mutex);
892
893 return ret;
894 }
895
896 static const struct file_operations sev_fops = {
897 .owner = THIS_MODULE,
898 .unlocked_ioctl = sev_ioctl,
899 };
900
sev_platform_status(struct sev_user_data_status * data,int * error)901 int sev_platform_status(struct sev_user_data_status *data, int *error)
902 {
903 return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
904 }
905 EXPORT_SYMBOL_GPL(sev_platform_status);
906
sev_guest_deactivate(struct sev_data_deactivate * data,int * error)907 int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
908 {
909 return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
910 }
911 EXPORT_SYMBOL_GPL(sev_guest_deactivate);
912
sev_guest_activate(struct sev_data_activate * data,int * error)913 int sev_guest_activate(struct sev_data_activate *data, int *error)
914 {
915 return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
916 }
917 EXPORT_SYMBOL_GPL(sev_guest_activate);
918
sev_guest_decommission(struct sev_data_decommission * data,int * error)919 int sev_guest_decommission(struct sev_data_decommission *data, int *error)
920 {
921 return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
922 }
923 EXPORT_SYMBOL_GPL(sev_guest_decommission);
924
sev_guest_df_flush(int * error)925 int sev_guest_df_flush(int *error)
926 {
927 return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
928 }
929 EXPORT_SYMBOL_GPL(sev_guest_df_flush);
930
sev_exit(struct kref * ref)931 static void sev_exit(struct kref *ref)
932 {
933 misc_deregister(&misc_dev->misc);
934 kfree(misc_dev);
935 misc_dev = NULL;
936 }
937
sev_misc_init(struct sev_device * sev)938 static int sev_misc_init(struct sev_device *sev)
939 {
940 struct device *dev = sev->dev;
941 int ret;
942
943 /*
944 * SEV feature support can be detected on multiple devices but the SEV
945 * FW commands must be issued on the master. During probe, we do not
946 * know the master hence we create /dev/sev on the first device probe.
947 * sev_do_cmd() finds the right master device to which to issue the
948 * command to the firmware.
949 */
950 if (!misc_dev) {
951 struct miscdevice *misc;
952
953 misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
954 if (!misc_dev)
955 return -ENOMEM;
956
957 misc = &misc_dev->misc;
958 misc->minor = MISC_DYNAMIC_MINOR;
959 misc->name = DEVICE_NAME;
960 misc->fops = &sev_fops;
961
962 ret = misc_register(misc);
963 if (ret)
964 return ret;
965
966 kref_init(&misc_dev->refcount);
967 } else {
968 kref_get(&misc_dev->refcount);
969 }
970
971 init_waitqueue_head(&sev->int_queue);
972 sev->misc = misc_dev;
973 dev_dbg(dev, "registered SEV device\n");
974
975 return 0;
976 }
977
sev_dev_init(struct psp_device * psp)978 int sev_dev_init(struct psp_device *psp)
979 {
980 struct device *dev = psp->dev;
981 struct sev_device *sev;
982 int ret = -ENOMEM;
983
984 sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL);
985 if (!sev)
986 goto e_err;
987
988 psp->sev_data = sev;
989
990 sev->dev = dev;
991 sev->psp = psp;
992
993 sev->io_regs = psp->io_regs;
994
995 sev->vdata = (struct sev_vdata *)psp->vdata->sev;
996 if (!sev->vdata) {
997 ret = -ENODEV;
998 dev_err(dev, "sev: missing driver data\n");
999 goto e_sev;
1000 }
1001
1002 psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
1003
1004 ret = sev_misc_init(sev);
1005 if (ret)
1006 goto e_irq;
1007
1008 dev_notice(dev, "sev enabled\n");
1009
1010 return 0;
1011
1012 e_irq:
1013 psp_clear_sev_irq_handler(psp);
1014 e_sev:
1015 devm_kfree(dev, sev);
1016 e_err:
1017 psp->sev_data = NULL;
1018
1019 dev_notice(dev, "sev initialization failed\n");
1020
1021 return ret;
1022 }
1023
sev_firmware_shutdown(struct sev_device * sev)1024 static void sev_firmware_shutdown(struct sev_device *sev)
1025 {
1026 sev_platform_shutdown(NULL);
1027
1028 if (sev_es_tmr) {
1029 /* The TMR area was encrypted, flush it from the cache */
1030 wbinvd_on_all_cpus();
1031
1032 free_pages((unsigned long)sev_es_tmr,
1033 get_order(SEV_ES_TMR_SIZE));
1034 sev_es_tmr = NULL;
1035 }
1036 }
1037
sev_dev_destroy(struct psp_device * psp)1038 void sev_dev_destroy(struct psp_device *psp)
1039 {
1040 struct sev_device *sev = psp->sev_data;
1041
1042 if (!sev)
1043 return;
1044
1045 sev_firmware_shutdown(sev);
1046
1047 if (sev->misc)
1048 kref_put(&misc_dev->refcount, sev_exit);
1049
1050 psp_clear_sev_irq_handler(psp);
1051 }
1052
sev_issue_cmd_external_user(struct file * filep,unsigned int cmd,void * data,int * error)1053 int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
1054 void *data, int *error)
1055 {
1056 if (!filep || filep->f_op != &sev_fops)
1057 return -EBADF;
1058
1059 return sev_do_cmd(cmd, data, error);
1060 }
1061 EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
1062
sev_pci_init(void)1063 void sev_pci_init(void)
1064 {
1065 struct sev_device *sev = psp_master->sev_data;
1066 struct page *tmr_page;
1067 int error, rc;
1068
1069 if (!sev)
1070 return;
1071
1072 psp_timeout = psp_probe_timeout;
1073
1074 if (sev_get_api_version())
1075 goto err;
1076
1077 if (sev_version_greater_or_equal(0, 15) &&
1078 sev_update_firmware(sev->dev) == 0)
1079 sev_get_api_version();
1080
1081 /* Obtain the TMR memory area for SEV-ES use */
1082 tmr_page = alloc_pages(GFP_KERNEL, get_order(SEV_ES_TMR_SIZE));
1083 if (tmr_page) {
1084 sev_es_tmr = page_address(tmr_page);
1085 } else {
1086 sev_es_tmr = NULL;
1087 dev_warn(sev->dev,
1088 "SEV: TMR allocation failed, SEV-ES support unavailable\n");
1089 }
1090
1091 /* Initialize the platform */
1092 rc = sev_platform_init(&error);
1093 if (rc && (error == SEV_RET_SECURE_DATA_INVALID)) {
1094 /*
1095 * INIT command returned an integrity check failure
1096 * status code, meaning that firmware load and
1097 * validation of SEV related persistent data has
1098 * failed and persistent state has been erased.
1099 * Retrying INIT command here should succeed.
1100 */
1101 dev_dbg(sev->dev, "SEV: retrying INIT command");
1102 rc = sev_platform_init(&error);
1103 }
1104
1105 if (rc) {
1106 dev_err(sev->dev, "SEV: failed to INIT error %#x\n", error);
1107 return;
1108 }
1109
1110 dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major,
1111 sev->api_minor, sev->build);
1112
1113 return;
1114
1115 err:
1116 psp_master->sev_data = NULL;
1117 }
1118
sev_pci_exit(void)1119 void sev_pci_exit(void)
1120 {
1121 struct sev_device *sev = psp_master->sev_data;
1122
1123 if (!sev)
1124 return;
1125
1126 sev_firmware_shutdown(sev);
1127 }
1128