1 /* SPDX-License-Identifier: GPL-2.0 */
2 /**
3 * Copyright(c) 2016-20 Intel Corporation.
4 *
5 * Contains the software defined data structures for enclaves.
6 */
7 #ifndef _X86_ENCL_H
8 #define _X86_ENCL_H
9
10 #include <linux/cpumask.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mm_types.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/mutex.h>
16 #include <linux/notifier.h>
17 #include <linux/srcu.h>
18 #include <linux/workqueue.h>
19 #include <linux/xarray.h>
20 #include "sgx.h"
21
22 /* 'desc' bits holding the offset in the VA (version array) page. */
23 #define SGX_ENCL_PAGE_VA_OFFSET_MASK GENMASK_ULL(11, 3)
24
25 /* 'desc' bit marking that the page is being reclaimed. */
26 #define SGX_ENCL_PAGE_BEING_RECLAIMED BIT(3)
27
28 struct sgx_encl_page {
29 unsigned long desc;
30 unsigned long vm_max_prot_bits;
31 struct sgx_epc_page *epc_page;
32 struct sgx_encl *encl;
33 struct sgx_va_page *va_page;
34 };
35
36 enum sgx_encl_flags {
37 SGX_ENCL_IOCTL = BIT(0),
38 SGX_ENCL_DEBUG = BIT(1),
39 SGX_ENCL_CREATED = BIT(2),
40 SGX_ENCL_INITIALIZED = BIT(3),
41 };
42
43 struct sgx_encl_mm {
44 struct sgx_encl *encl;
45 struct mm_struct *mm;
46 struct list_head list;
47 struct mmu_notifier mmu_notifier;
48 };
49
50 struct sgx_encl {
51 unsigned long base;
52 unsigned long size;
53 unsigned long flags;
54 unsigned int page_cnt;
55 unsigned int secs_child_cnt;
56 struct mutex lock;
57 struct xarray page_array;
58 struct sgx_encl_page secs;
59 unsigned long attributes;
60 unsigned long attributes_mask;
61
62 cpumask_t cpumask;
63 struct file *backing;
64 struct kref refcount;
65 struct list_head va_pages;
66 unsigned long mm_list_version;
67 struct list_head mm_list;
68 spinlock_t mm_lock;
69 struct srcu_struct srcu;
70 };
71
72 #define SGX_VA_SLOT_COUNT 512
73
74 struct sgx_va_page {
75 struct sgx_epc_page *epc_page;
76 DECLARE_BITMAP(slots, SGX_VA_SLOT_COUNT);
77 struct list_head list;
78 };
79
80 struct sgx_backing {
81 pgoff_t page_index;
82 struct page *contents;
83 struct page *pcmd;
84 unsigned long pcmd_offset;
85 };
86
87 extern const struct vm_operations_struct sgx_vm_ops;
88
sgx_encl_find(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** vma)89 static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
90 struct vm_area_struct **vma)
91 {
92 struct vm_area_struct *result;
93
94 result = vma_lookup(mm, addr);
95 if (!result || result->vm_ops != &sgx_vm_ops)
96 return -EINVAL;
97
98 *vma = result;
99
100 return 0;
101 }
102
103 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
104 unsigned long end, unsigned long vm_flags);
105
106 bool current_is_ksgxd(void);
107 void sgx_encl_release(struct kref *ref);
108 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
109 int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
110 struct sgx_backing *backing);
111 int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
112 struct sgx_backing *backing);
113 void sgx_encl_put_backing(struct sgx_backing *backing);
114 int sgx_encl_test_and_clear_young(struct mm_struct *mm,
115 struct sgx_encl_page *page);
116
117 struct sgx_epc_page *sgx_alloc_va_page(void);
118 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
119 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
120 bool sgx_va_page_full(struct sgx_va_page *va_page);
121 void sgx_encl_free_epc_page(struct sgx_epc_page *page);
122
123 #endif /* _X86_ENCL_H */
124