1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Ultravisor Interfaces
4 *
5 * Copyright IBM Corp. 2019
6 *
7 * Author(s):
8 * Vasily Gorbik <gor@linux.ibm.com>
9 * Janosch Frank <frankja@linux.ibm.com>
10 */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20
21 #define UVC_RC_EXECUTED 0x0001
22 #define UVC_RC_INV_CMD 0x0002
23 #define UVC_RC_INV_STATE 0x0003
24 #define UVC_RC_INV_LEN 0x0005
25 #define UVC_RC_NO_RESUME 0x0007
26 #define UVC_RC_NEED_DESTROY 0x8000
27
28 #define UVC_CMD_QUI 0x0001
29 #define UVC_CMD_INIT_UV 0x000f
30 #define UVC_CMD_CREATE_SEC_CONF 0x0100
31 #define UVC_CMD_DESTROY_SEC_CONF 0x0101
32 #define UVC_CMD_CREATE_SEC_CPU 0x0120
33 #define UVC_CMD_DESTROY_SEC_CPU 0x0121
34 #define UVC_CMD_CONV_TO_SEC_STOR 0x0200
35 #define UVC_CMD_CONV_FROM_SEC_STOR 0x0201
36 #define UVC_CMD_DESTR_SEC_STOR 0x0202
37 #define UVC_CMD_SET_SEC_CONF_PARAMS 0x0300
38 #define UVC_CMD_UNPACK_IMG 0x0301
39 #define UVC_CMD_VERIFY_IMG 0x0302
40 #define UVC_CMD_CPU_RESET 0x0310
41 #define UVC_CMD_CPU_RESET_INITIAL 0x0311
42 #define UVC_CMD_PREPARE_RESET 0x0320
43 #define UVC_CMD_CPU_RESET_CLEAR 0x0321
44 #define UVC_CMD_CPU_SET_STATE 0x0330
45 #define UVC_CMD_SET_UNSHARE_ALL 0x0340
46 #define UVC_CMD_PIN_PAGE_SHARED 0x0341
47 #define UVC_CMD_UNPIN_PAGE_SHARED 0x0342
48 #define UVC_CMD_SET_SHARED_ACCESS 0x1000
49 #define UVC_CMD_REMOVE_SHARED_ACCESS 0x1001
50
51 /* Bits in installed uv calls */
52 enum uv_cmds_inst {
53 BIT_UVC_CMD_QUI = 0,
54 BIT_UVC_CMD_INIT_UV = 1,
55 BIT_UVC_CMD_CREATE_SEC_CONF = 2,
56 BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
57 BIT_UVC_CMD_CREATE_SEC_CPU = 4,
58 BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
59 BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
60 BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
61 BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
62 BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
63 BIT_UVC_CMD_SET_SEC_PARMS = 11,
64 BIT_UVC_CMD_UNPACK_IMG = 13,
65 BIT_UVC_CMD_VERIFY_IMG = 14,
66 BIT_UVC_CMD_CPU_RESET = 15,
67 BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
68 BIT_UVC_CMD_CPU_SET_STATE = 17,
69 BIT_UVC_CMD_PREPARE_RESET = 18,
70 BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
71 BIT_UVC_CMD_UNSHARE_ALL = 20,
72 BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
73 BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
74 };
75
76 enum uv_feat_ind {
77 BIT_UV_FEAT_MISC = 0,
78 };
79
80 struct uv_cb_header {
81 u16 len;
82 u16 cmd; /* Command Code */
83 u16 rc; /* Response Code */
84 u16 rrc; /* Return Reason Code */
85 } __packed __aligned(8);
86
87 /* Query Ultravisor Information */
88 struct uv_cb_qui {
89 struct uv_cb_header header;
90 u64 reserved08;
91 u64 inst_calls_list[4];
92 u64 reserved30[2];
93 u64 uv_base_stor_len;
94 u64 reserved48;
95 u64 conf_base_phys_stor_len;
96 u64 conf_base_virt_stor_len;
97 u64 conf_virt_var_stor_len;
98 u64 cpu_stor_len;
99 u32 reserved70[3];
100 u32 max_num_sec_conf;
101 u64 max_guest_stor_addr;
102 u8 reserved88[158 - 136];
103 u16 max_guest_cpu_id;
104 u64 uv_feature_indications;
105 u8 reserveda0[200 - 168];
106 } __packed __aligned(8);
107
108 /* Initialize Ultravisor */
109 struct uv_cb_init {
110 struct uv_cb_header header;
111 u64 reserved08[2];
112 u64 stor_origin;
113 u64 stor_len;
114 u64 reserved28[4];
115 } __packed __aligned(8);
116
117 /* Create Guest Configuration */
118 struct uv_cb_cgc {
119 struct uv_cb_header header;
120 u64 reserved08[2];
121 u64 guest_handle;
122 u64 conf_base_stor_origin;
123 u64 conf_virt_stor_origin;
124 u64 reserved30;
125 u64 guest_stor_origin;
126 u64 guest_stor_len;
127 u64 guest_sca;
128 u64 guest_asce;
129 u64 reserved58[5];
130 } __packed __aligned(8);
131
132 /* Create Secure CPU */
133 struct uv_cb_csc {
134 struct uv_cb_header header;
135 u64 reserved08[2];
136 u64 cpu_handle;
137 u64 guest_handle;
138 u64 stor_origin;
139 u8 reserved30[6];
140 u16 num;
141 u64 state_origin;
142 u64 reserved40[4];
143 } __packed __aligned(8);
144
145 /* Convert to Secure */
146 struct uv_cb_cts {
147 struct uv_cb_header header;
148 u64 reserved08[2];
149 u64 guest_handle;
150 u64 gaddr;
151 } __packed __aligned(8);
152
153 /* Convert from Secure / Pin Page Shared */
154 struct uv_cb_cfs {
155 struct uv_cb_header header;
156 u64 reserved08[2];
157 u64 paddr;
158 } __packed __aligned(8);
159
160 /* Set Secure Config Parameter */
161 struct uv_cb_ssc {
162 struct uv_cb_header header;
163 u64 reserved08[2];
164 u64 guest_handle;
165 u64 sec_header_origin;
166 u32 sec_header_len;
167 u32 reserved2c;
168 u64 reserved30[4];
169 } __packed __aligned(8);
170
171 /* Unpack */
172 struct uv_cb_unp {
173 struct uv_cb_header header;
174 u64 reserved08[2];
175 u64 guest_handle;
176 u64 gaddr;
177 u64 tweak[2];
178 u64 reserved38[3];
179 } __packed __aligned(8);
180
181 #define PV_CPU_STATE_OPR 1
182 #define PV_CPU_STATE_STP 2
183 #define PV_CPU_STATE_CHKSTP 3
184 #define PV_CPU_STATE_OPR_LOAD 5
185
186 struct uv_cb_cpu_set_state {
187 struct uv_cb_header header;
188 u64 reserved08[2];
189 u64 cpu_handle;
190 u8 reserved20[7];
191 u8 state;
192 u64 reserved28[5];
193 };
194
195 /*
196 * A common UV call struct for calls that take no payload
197 * Examples:
198 * Destroy cpu/config
199 * Verify
200 */
201 struct uv_cb_nodata {
202 struct uv_cb_header header;
203 u64 reserved08[2];
204 u64 handle;
205 u64 reserved20[4];
206 } __packed __aligned(8);
207
208 /* Set Shared Access */
209 struct uv_cb_share {
210 struct uv_cb_header header;
211 u64 reserved08[3];
212 u64 paddr;
213 u64 reserved28;
214 } __packed __aligned(8);
215
__uv_call(unsigned long r1,unsigned long r2)216 static inline int __uv_call(unsigned long r1, unsigned long r2)
217 {
218 int cc;
219
220 asm volatile(
221 " .insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
222 " ipm %[cc]\n"
223 " srl %[cc],28\n"
224 : [cc] "=d" (cc)
225 : [r1] "a" (r1), [r2] "a" (r2)
226 : "memory", "cc");
227 return cc;
228 }
229
uv_call(unsigned long r1,unsigned long r2)230 static inline int uv_call(unsigned long r1, unsigned long r2)
231 {
232 int cc;
233
234 do {
235 cc = __uv_call(r1, r2);
236 } while (cc > 1);
237 return cc;
238 }
239
240 /* Low level uv_call that avoids stalls for long running busy conditions */
uv_call_sched(unsigned long r1,unsigned long r2)241 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
242 {
243 int cc;
244
245 do {
246 cc = __uv_call(r1, r2);
247 cond_resched();
248 } while (cc > 1);
249 return cc;
250 }
251
252 /*
253 * special variant of uv_call that only transports the cpu or guest
254 * handle and the command, like destroy or verify.
255 */
uv_cmd_nodata(u64 handle,u16 cmd,u16 * rc,u16 * rrc)256 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
257 {
258 struct uv_cb_nodata uvcb = {
259 .header.cmd = cmd,
260 .header.len = sizeof(uvcb),
261 .handle = handle,
262 };
263 int cc;
264
265 WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
266 cc = uv_call_sched(0, (u64)&uvcb);
267 *rc = uvcb.header.rc;
268 *rrc = uvcb.header.rrc;
269 return cc ? -EINVAL : 0;
270 }
271
272 struct uv_info {
273 unsigned long inst_calls_list[4];
274 unsigned long uv_base_stor_len;
275 unsigned long guest_base_stor_len;
276 unsigned long guest_virt_base_stor_len;
277 unsigned long guest_virt_var_stor_len;
278 unsigned long guest_cpu_stor_len;
279 unsigned long max_sec_stor_addr;
280 unsigned int max_num_sec_conf;
281 unsigned short max_guest_cpu_id;
282 unsigned long uv_feature_indications;
283 };
284
285 extern struct uv_info uv_info;
286
287 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
288 extern int prot_virt_guest;
289
is_prot_virt_guest(void)290 static inline int is_prot_virt_guest(void)
291 {
292 return prot_virt_guest;
293 }
294
share(unsigned long addr,u16 cmd)295 static inline int share(unsigned long addr, u16 cmd)
296 {
297 struct uv_cb_share uvcb = {
298 .header.cmd = cmd,
299 .header.len = sizeof(uvcb),
300 .paddr = addr
301 };
302
303 if (!is_prot_virt_guest())
304 return -EOPNOTSUPP;
305 /*
306 * Sharing is page wise, if we encounter addresses that are
307 * not page aligned, we assume something went wrong. If
308 * malloced structs are passed to this function, we could leak
309 * data to the hypervisor.
310 */
311 BUG_ON(addr & ~PAGE_MASK);
312
313 if (!uv_call(0, (u64)&uvcb))
314 return 0;
315 return -EINVAL;
316 }
317
318 /*
319 * Guest 2 request to the Ultravisor to make a page shared with the
320 * hypervisor for IO.
321 *
322 * @addr: Real or absolute address of the page to be shared
323 */
uv_set_shared(unsigned long addr)324 static inline int uv_set_shared(unsigned long addr)
325 {
326 return share(addr, UVC_CMD_SET_SHARED_ACCESS);
327 }
328
329 /*
330 * Guest 2 request to the Ultravisor to make a page unshared.
331 *
332 * @addr: Real or absolute address of the page to be unshared
333 */
uv_remove_shared(unsigned long addr)334 static inline int uv_remove_shared(unsigned long addr)
335 {
336 return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
337 }
338
339 #else
340 #define is_prot_virt_guest() 0
uv_set_shared(unsigned long addr)341 static inline int uv_set_shared(unsigned long addr) { return 0; }
uv_remove_shared(unsigned long addr)342 static inline int uv_remove_shared(unsigned long addr) { return 0; }
343 #endif
344
345 #if IS_ENABLED(CONFIG_KVM)
346 extern int prot_virt_host;
347
is_prot_virt_host(void)348 static inline int is_prot_virt_host(void)
349 {
350 return prot_virt_host;
351 }
352
353 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
354 int uv_destroy_page(unsigned long paddr);
355 int uv_convert_from_secure(unsigned long paddr);
356 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
357
358 void setup_uv(void);
359 void adjust_to_uv_max(unsigned long *vmax);
360 #else
361 #define is_prot_virt_host() 0
setup_uv(void)362 static inline void setup_uv(void) {}
adjust_to_uv_max(unsigned long * vmax)363 static inline void adjust_to_uv_max(unsigned long *vmax) {}
364
uv_destroy_page(unsigned long paddr)365 static inline int uv_destroy_page(unsigned long paddr)
366 {
367 return 0;
368 }
369
uv_convert_from_secure(unsigned long paddr)370 static inline int uv_convert_from_secure(unsigned long paddr)
371 {
372 return 0;
373 }
374 #endif
375
376 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
377 void uv_query_info(void);
378 #else
uv_query_info(void)379 static inline void uv_query_info(void) {}
380 #endif
381
382 #endif /* _ASM_S390_UV_H */
383