• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2023 MediaTek Inc.
4  */
5 
6 #ifndef __GZVM_DRV_H__
7 #define __GZVM_DRV_H__
8 
9 #include <linux/eventfd.h>
10 #include <linux/list.h>
11 #include <linux/mm.h>
12 #include <linux/mutex.h>
13 #include <linux/gzvm.h>
14 #include <linux/srcu.h>
15 #include <linux/rbtree.h>
16 #include <linux/kref.h>
17 
18 /* GZVM version encode */
19 #define GZVM_DRV_MAJOR_VERSION		16
20 #define GZVM_DRV_MINOR_VERSION		0
21 
22 struct gzvm_version {
23 	u32 major;
24 	u32 minor;
25 	u64 sub;	/* currently, used by hypervisor */
26 };
27 
28 struct gzvm_driver {
29 	struct gzvm_version hyp_version;
30 	struct gzvm_version drv_version;
31 
32 	struct kobject *sysfs_root_dir;
33 	u32 demand_paging_batch_pages;
34 	u32 destroy_batch_pages;
35 
36 	struct dentry *gzvm_debugfs_dir;
37 };
38 
39 /*
40  * For the normal physical address, the highest 12 bits should be zero, so we
41  * can mask bit 62 ~ bit 52 to indicate the error physical address
42  */
43 #define GZVM_PA_ERR_BAD (0x7ffULL << 52)
44 
45 #define GZVM_VCPU_MMAP_SIZE  PAGE_SIZE
46 #define INVALID_VM_ID   0xffff
47 
48 /*
49  * These are the definitions of APIs between GenieZone hypervisor and driver,
50  * there's no need to be visible to uapi. Furthermore, we need GenieZone
51  * specific error code in order to map to Linux errno
52  */
53 #define NO_ERROR                (0)
54 #define ERR_NO_MEMORY           (-5)
55 #define ERR_INVALID_ARGS        (-8)
56 #define ERR_NOT_SUPPORTED       (-24)
57 #define ERR_NOT_IMPLEMENTED     (-27)
58 #define ERR_BUSY                (-33)
59 #define ERR_FAULT               (-40)
60 #define GZVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID       1
61 
62 /*
63  * The following data structures are for data transferring between driver and
64  * hypervisor, and they're aligned with hypervisor definitions
65  */
66 #define GZVM_MAX_VCPUS		 8
67 #define GZVM_MAX_MEM_REGION	10
68 
69 #define GZVM_VCPU_RUN_MAP_SIZE			(PAGE_SIZE * 2)
70 
71 #define GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE	(PMD_SIZE) /* 2MB */
72 #define GZVM_DRV_DEMAND_PAGING_BATCH_PAGES	\
73 	(GZVM_BLOCK_BASED_DEMAND_PAGE_SIZE / PAGE_SIZE)
74 #define GZVM_DRV_DESTROY_PAGING_BATCH_PAGES	(128)
75 
76 #define GZVM_MAX_DEBUGFS_DIR_NAME_SIZE  20
77 #define GZVM_MAX_DEBUGFS_VALUE_SIZE	20
78 
79 enum gzvm_demand_paging_mode {
80 	GZVM_FULLY_POPULATED = 0,
81 	GZVM_DEMAND_PAGING = 1,
82 };
83 
84 /**
85  * struct mem_region_addr_range: identical to ffa memory constituent
86  * @address: the base IPA of the constituent memory region, aligned to 4 kiB
87  * @pg_cnt: the number of 4 kiB pages in the constituent memory region
88  * @reserved: reserved for 64bit alignment
89  */
90 struct mem_region_addr_range {
91 	__u64 address;
92 	__u32 pg_cnt;
93 	__u32 reserved;
94 };
95 
96 struct gzvm_memory_region_ranges {
97 	__u32 slot;
98 	__u32 constituent_cnt;
99 	__u64 total_pages;
100 	__u64 gpa;
101 	struct mem_region_addr_range constituents[];
102 };
103 
104 /*
105  * A reasonable and large enough limit for the maximum number of pages a
106  * guest can use.
107  */
108 #define GZVM_MEM_MAX_NR_PAGES		((1UL << 31) - 1)
109 
110 /**
111  * struct gzvm_memslot: VM's memory slot descriptor
112  * @base_gfn: begin of guest page frame
113  * @npages: number of pages this slot covers
114  * @userspace_addr: corresponding userspace va
115  * @vma: vma related to this userspace addr
116  * @flags: define the usage of memory region. Ex. guest memory or
117  * firmware protection
118  * @slot_id: the id is used to identify the memory slot
119  */
120 struct gzvm_memslot {
121 	u64 base_gfn;
122 	unsigned long npages;
123 	unsigned long userspace_addr;
124 	struct vm_area_struct *vma;
125 	u32 flags;
126 	u32 slot_id;
127 };
128 
129 struct gzvm_vcpu {
130 	struct gzvm *gzvm;
131 	int vcpuid;
132 	/* lock of vcpu*/
133 	struct mutex lock;
134 	struct gzvm_vcpu_run *run;
135 	struct gzvm_vcpu_hwstate *hwstate;
136 	struct hrtimer gzvm_vtimer;
137 	struct {
138 		u32 vtimer_irq;
139 		u32 virtio_irq;
140 	} idle_events;
141 	struct rcuwait wait;
142 };
143 
144 struct gzvm_pinned_page {
145 	struct rb_node node;
146 	struct page *page;
147 	u64 ipa;
148 };
149 
150 struct gzvm_vm_stat {
151 	u64 protected_hyp_mem;
152 	u64 protected_shared_mem;
153 };
154 
155 /**
156  * struct gzvm: the following data structures are for data transferring between
157  * driver and hypervisor, and they're aligned with hypervisor definitions.
158  * @gzvm_drv: the data structure is used to keep driver's information
159  * @vcpus: VM's cpu descriptors
160  * @mm: userspace tied to this vm
161  * @memslot: VM's memory slot descriptor
162  * @lock: lock for list_add
163  * @irqfds: the data structure is used to keep irqfds's information
164  * @ioevents: list head for ioevents
165  * @ioevent_lock: lock for ioevent list
166  * @vm_list: list head for vm list
167  * @vm_id: vm id
168  * @irq_ack_notifier_list: list head for irq ack notifier
169  * @irq_srcu: structure data for SRCU(sleepable rcu)
170  * @irq_lock: lock for irq injection
171  * @pinned_pages: use rb-tree to record pin/unpin page
172  * @mem_lock: lock for memory operations
173  * @mem_alloc_mode: memory allocation mode - fully allocated or demand paging
174  * @demand_page_gran: demand page granularity: how much memory we allocate for
175  * VM in a single page fault
176  * @demand_page_buffer: the mailbox for transferring large portion pages
177  * @demand_paging_lock: lock for preventing multiple cpu using the same demand
178  * page mailbox at the same time
179  * @stat: information for VM memory statistics
180  * @debug_dir: debugfs directory node for VM memory statistics
181  * @kref: reference counter between vm and vcpu when destroy happened
182  */
183 struct gzvm {
184 	struct gzvm_driver *gzvm_drv;
185 	struct gzvm_vcpu *vcpus[GZVM_MAX_VCPUS];
186 	struct mm_struct *mm;
187 	struct gzvm_memslot memslot[GZVM_MAX_MEM_REGION];
188 	struct mutex lock;
189 
190 	struct {
191 		spinlock_t        lock;
192 		struct list_head  items;
193 		struct list_head  resampler_list;
194 		struct mutex      resampler_lock;
195 	} irqfds;
196 
197 	struct list_head ioevents;
198 	struct mutex ioevent_lock;
199 
200 	struct list_head vm_list;
201 	u16 vm_id;
202 
203 	struct hlist_head irq_ack_notifier_list;
204 	struct srcu_struct irq_srcu;
205 	struct mutex irq_lock;
206 	u32 mem_alloc_mode;
207 
208 	struct rb_root pinned_pages;
209 	struct mutex mem_lock;
210 
211 	u32 demand_page_gran;
212 	u64 *demand_page_buffer;
213 	struct mutex  demand_paging_lock;
214 
215 	struct gzvm_vm_stat stat;
216 	struct dentry *debug_dir;
217 	struct kref kref;
218 };
219 
220 long gzvm_dev_ioctl_check_extension(struct gzvm *gzvm, unsigned long args);
221 int gzvm_dev_ioctl_create_vm(struct gzvm_driver *drv, unsigned long vm_type);
222 
223 int gzvm_err_to_errno(unsigned long err);
224 
225 void gzvm_destroy_all_vms(void);
226 
227 void gzvm_destroy_vcpus(struct gzvm *gzvm);
228 
229 /* arch-dependant functions */
230 int gzvm_arch_probe(struct gzvm_version drv_version,
231 		    struct gzvm_version *hyp_version);
232 int gzvm_arch_query_hyp_batch_pages(struct gzvm_enable_cap *cap,
233 				    void __user *argp);
234 int gzvm_arch_query_destroy_batch_pages(struct gzvm_enable_cap *cap,
235 					void __user *argp);
236 
237 int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
238 			    phys_addr_t region);
239 int gzvm_arch_check_extension(struct gzvm *gzvm, __u64 cap, void __user *argp);
240 int gzvm_arch_create_vm(unsigned long vm_type);
241 int gzvm_arch_destroy_vm(u16 vm_id, u64 destroy_page_gran);
242 int gzvm_arch_map_guest(u16 vm_id, int memslot_id, u64 pfn, u64 gfn,
243 			u64 nr_pages);
244 int gzvm_arch_map_guest_block(u16 vm_id, int memslot_id, u64 gfn, u64 nr_pages);
245 int gzvm_arch_get_statistics(struct gzvm *gzvm);
246 int gzvm_vm_ioctl_arch_enable_cap(struct gzvm *gzvm,
247 				  struct gzvm_enable_cap *cap,
248 				  void __user *argp);
249 
250 int gzvm_gfn_to_hva_memslot(struct gzvm_memslot *memslot, u64 gfn,
251 			    u64 *hva_memslot);
252 int gzvm_vm_populate_mem_region(struct gzvm *gzvm, int slot_id);
253 int gzvm_vm_allocate_guest_page(struct gzvm *gzvm, struct gzvm_memslot *slot,
254 				u64 gfn, u64 *pfn);
255 
256 int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid);
257 int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
258 				  bool is_write, __u64 *data);
259 int gzvm_arch_drv_init(void);
260 int gzvm_arch_create_vcpu(u16 vm_id, int vcpuid, void *run);
261 int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
262 int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid);
263 int gzvm_arch_inform_exit(u16 vm_id);
264 
265 u64 gzvm_vcpu_arch_get_timer_delay_ns(struct gzvm_vcpu *vcpu);
266 
267 void gzvm_vtimer_set(struct gzvm_vcpu *vcpu, u64 ns);
268 void gzvm_vtimer_release(struct gzvm_vcpu *vcpu);
269 
270 int gzvm_find_memslot(struct gzvm *vm, u64 gpa);
271 int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu);
272 bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu);
273 int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa);
274 bool gzvm_handle_guest_hvc(struct gzvm_vcpu *vcpu);
275 bool gzvm_arch_handle_guest_hvc(struct gzvm_vcpu *vcpu);
276 int gzvm_handle_guest_idle(struct gzvm_vcpu *vcpu);
277 void gzvm_handle_guest_ipi(struct gzvm_vcpu *vcpu);
278 void gzvm_vcpu_wakeup_all(struct gzvm *gzvm);
279 
280 int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev);
281 int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,
282 			 u32 irq, bool level);
283 
284 void gzvm_notify_acked_irq(struct gzvm *gzvm, unsigned int gsi);
285 int gzvm_irqfd(struct gzvm *gzvm, struct gzvm_irqfd *args);
286 int gzvm_drv_irqfd_init(void);
287 void gzvm_drv_irqfd_exit(void);
288 int gzvm_vm_irqfd_init(struct gzvm *gzvm);
289 void gzvm_vm_irqfd_release(struct gzvm *gzvm);
290 
291 int gzvm_arch_memregion_purpose(struct gzvm *gzvm,
292 				struct gzvm_userspace_memory_region *mem);
293 int gzvm_arch_set_dtb_config(struct gzvm *gzvm, struct gzvm_dtb_config *args);
294 
295 int gzvm_init_ioeventfd(struct gzvm *gzvm);
296 void gzvm_vm_ioeventfd_release(struct gzvm *gzvm);
297 int gzvm_ioeventfd(struct gzvm *gzvm, struct gzvm_ioeventfd *args);
298 bool gzvm_ioevent_write(struct gzvm_vcpu *vcpu, __u64 addr, int len,
299 			const void *val);
300 void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt);
301 struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr);
302 void add_wait_queue_priority(struct wait_queue_head *wq_head,
303 			     struct wait_queue_entry *wq_entry);
304 void gzvm_vm_put(struct gzvm *gzvm);
305 void gzvm_vm_get(struct gzvm *gzvm);
306 
307 #endif /* __GZVM_DRV_H__ */
308