• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4 
5 #ifdef CONFIG_PPC_PSERIES
6 
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 
10 #include <asm/hvcall.h>
11 #include <asm/paca.h>
12 #include <asm/lppaca.h>
13 #include <asm/page.h>
14 
poll_pending(void)15 static inline long poll_pending(void)
16 {
17 	return plpar_hcall_norets(H_POLL_PENDING);
18 }
19 
get_cede_latency_hint(void)20 static inline u8 get_cede_latency_hint(void)
21 {
22 	return get_lppaca()->cede_latency_hint;
23 }
24 
set_cede_latency_hint(u8 latency_hint)25 static inline void set_cede_latency_hint(u8 latency_hint)
26 {
27 	get_lppaca()->cede_latency_hint = latency_hint;
28 }
29 
cede_processor(void)30 static inline long cede_processor(void)
31 {
32 	return plpar_hcall_norets(H_CEDE);
33 }
34 
extended_cede_processor(unsigned long latency_hint)35 static inline long extended_cede_processor(unsigned long latency_hint)
36 {
37 	long rc;
38 	u8 old_latency_hint = get_cede_latency_hint();
39 
40 	set_cede_latency_hint(latency_hint);
41 
42 	rc = cede_processor();
43 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
44 	/* Ensure that H_CEDE returns with IRQs on */
45 	if (WARN_ON(!(mfmsr() & MSR_EE)))
46 		__hard_irq_enable();
47 #endif
48 
49 	set_cede_latency_hint(old_latency_hint);
50 
51 	return rc;
52 }
53 
vpa_call(unsigned long flags,unsigned long cpu,unsigned long vpa)54 static inline long vpa_call(unsigned long flags, unsigned long cpu,
55 		unsigned long vpa)
56 {
57 	flags = flags << H_VPA_FUNC_SHIFT;
58 
59 	return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
60 }
61 
unregister_vpa(unsigned long cpu)62 static inline long unregister_vpa(unsigned long cpu)
63 {
64 	return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
65 }
66 
register_vpa(unsigned long cpu,unsigned long vpa)67 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
68 {
69 	return vpa_call(H_VPA_REG_VPA, cpu, vpa);
70 }
71 
unregister_slb_shadow(unsigned long cpu)72 static inline long unregister_slb_shadow(unsigned long cpu)
73 {
74 	return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
75 }
76 
register_slb_shadow(unsigned long cpu,unsigned long vpa)77 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
78 {
79 	return vpa_call(H_VPA_REG_SLB, cpu, vpa);
80 }
81 
unregister_dtl(unsigned long cpu)82 static inline long unregister_dtl(unsigned long cpu)
83 {
84 	return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
85 }
86 
register_dtl(unsigned long cpu,unsigned long vpa)87 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
88 {
89 	return vpa_call(H_VPA_REG_DTL, cpu, vpa);
90 }
91 
92 extern void vpa_init(int cpu);
93 
plpar_pte_enter(unsigned long flags,unsigned long hpte_group,unsigned long hpte_v,unsigned long hpte_r,unsigned long * slot)94 static inline long plpar_pte_enter(unsigned long flags,
95 		unsigned long hpte_group, unsigned long hpte_v,
96 		unsigned long hpte_r, unsigned long *slot)
97 {
98 	long rc;
99 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
100 
101 	rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
102 
103 	*slot = retbuf[0];
104 
105 	return rc;
106 }
107 
plpar_pte_remove(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)108 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
109 		unsigned long avpn, unsigned long *old_pteh_ret,
110 		unsigned long *old_ptel_ret)
111 {
112 	long rc;
113 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
114 
115 	rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
116 
117 	*old_pteh_ret = retbuf[0];
118 	*old_ptel_ret = retbuf[1];
119 
120 	return rc;
121 }
122 
123 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_remove_raw(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)124 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
125 		unsigned long avpn, unsigned long *old_pteh_ret,
126 		unsigned long *old_ptel_ret)
127 {
128 	long rc;
129 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
130 
131 	rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
132 
133 	*old_pteh_ret = retbuf[0];
134 	*old_ptel_ret = retbuf[1];
135 
136 	return rc;
137 }
138 
plpar_pte_read(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)139 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
140 		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
141 {
142 	long rc;
143 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
144 
145 	rc = plpar_hcall(H_READ, retbuf, flags, ptex);
146 
147 	*old_pteh_ret = retbuf[0];
148 	*old_ptel_ret = retbuf[1];
149 
150 	return rc;
151 }
152 
153 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_read_raw(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)154 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
155 		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
156 {
157 	long rc;
158 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
159 
160 	rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
161 
162 	*old_pteh_ret = retbuf[0];
163 	*old_ptel_ret = retbuf[1];
164 
165 	return rc;
166 }
167 
168 /*
169  * ptes must be 8*sizeof(unsigned long)
170  */
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)171 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
172 				    unsigned long *ptes)
173 
174 {
175 	long rc;
176 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
177 
178 	rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
179 
180 	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
181 
182 	return rc;
183 }
184 
185 /*
186  * plpar_pte_read_4_raw can be called in real mode.
187  * ptes must be 8*sizeof(unsigned long)
188  */
plpar_pte_read_4_raw(unsigned long flags,unsigned long ptex,unsigned long * ptes)189 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
190 					unsigned long *ptes)
191 
192 {
193 	long rc;
194 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
195 
196 	rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
197 
198 	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
199 
200 	return rc;
201 }
202 
plpar_pte_protect(unsigned long flags,unsigned long ptex,unsigned long avpn)203 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
204 		unsigned long avpn)
205 {
206 	return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
207 }
208 
plpar_resize_hpt_prepare(unsigned long flags,unsigned long shift)209 static inline long plpar_resize_hpt_prepare(unsigned long flags,
210 					    unsigned long shift)
211 {
212 	return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
213 }
214 
plpar_resize_hpt_commit(unsigned long flags,unsigned long shift)215 static inline long plpar_resize_hpt_commit(unsigned long flags,
216 					   unsigned long shift)
217 {
218 	return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
219 }
220 
plpar_tce_get(unsigned long liobn,unsigned long ioba,unsigned long * tce_ret)221 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
222 		unsigned long *tce_ret)
223 {
224 	long rc;
225 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
226 
227 	rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
228 
229 	*tce_ret = retbuf[0];
230 
231 	return rc;
232 }
233 
plpar_tce_put(unsigned long liobn,unsigned long ioba,unsigned long tceval)234 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
235 		unsigned long tceval)
236 {
237 	return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
238 }
239 
plpar_tce_put_indirect(unsigned long liobn,unsigned long ioba,unsigned long page,unsigned long count)240 static inline long plpar_tce_put_indirect(unsigned long liobn,
241 		unsigned long ioba, unsigned long page, unsigned long count)
242 {
243 	return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
244 }
245 
plpar_tce_stuff(unsigned long liobn,unsigned long ioba,unsigned long tceval,unsigned long count)246 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
247 		unsigned long tceval, unsigned long count)
248 {
249 	return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
250 }
251 
252 /* Set various resource mode parameters */
plpar_set_mode(unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)253 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
254 		unsigned long value1, unsigned long value2)
255 {
256 	return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
257 }
258 
259 /*
260  * Enable relocation on exceptions on this partition
261  *
262  * Note: this call has a partition wide scope and can take a while to complete.
263  * If it returns H_LONG_BUSY_* it should be retried periodically until it
264  * returns H_SUCCESS.
265  */
enable_reloc_on_exceptions(void)266 static inline long enable_reloc_on_exceptions(void)
267 {
268 	/* mflags = 3: Exceptions at 0xC000000000004000 */
269 	return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
270 }
271 
272 /*
273  * Disable relocation on exceptions on this partition
274  *
275  * Note: this call has a partition wide scope and can take a while to complete.
276  * If it returns H_LONG_BUSY_* it should be retried periodically until it
277  * returns H_SUCCESS.
278  */
disable_reloc_on_exceptions(void)279 static inline long disable_reloc_on_exceptions(void) {
280 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
281 }
282 
283 /*
284  * Take exceptions in big endian mode on this partition
285  *
286  * Note: this call has a partition wide scope and can take a while to complete.
287  * If it returns H_LONG_BUSY_* it should be retried periodically until it
288  * returns H_SUCCESS.
289  */
enable_big_endian_exceptions(void)290 static inline long enable_big_endian_exceptions(void)
291 {
292 	/* mflags = 0: big endian exceptions */
293 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
294 }
295 
296 /*
297  * Take exceptions in little endian mode on this partition
298  *
299  * Note: this call has a partition wide scope and can take a while to complete.
300  * If it returns H_LONG_BUSY_* it should be retried periodically until it
301  * returns H_SUCCESS.
302  */
enable_little_endian_exceptions(void)303 static inline long enable_little_endian_exceptions(void)
304 {
305 	/* mflags = 1: little endian exceptions */
306 	return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
307 }
308 
plpar_set_ciabr(unsigned long ciabr)309 static inline long plpar_set_ciabr(unsigned long ciabr)
310 {
311 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
312 }
313 
plpar_set_watchpoint0(unsigned long dawr0,unsigned long dawrx0)314 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
315 {
316 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
317 }
318 
plpar_set_watchpoint1(unsigned long dawr1,unsigned long dawrx1)319 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
320 {
321 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
322 }
323 
plpar_signal_sys_reset(long cpu)324 static inline long plpar_signal_sys_reset(long cpu)
325 {
326 	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
327 }
328 
plpar_get_cpu_characteristics(struct h_cpu_char_result * p)329 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
330 {
331 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
332 	long rc;
333 
334 	rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
335 	if (rc == H_SUCCESS) {
336 		p->character = retbuf[0];
337 		p->behaviour = retbuf[1];
338 	}
339 
340 	return rc;
341 }
342 
343 /*
344  * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
345  *
346  * - Returns H_SUCCESS on success
347  * - For H_BUSY return value, we retry the hcall.
348  * - For any other hcall failures, attempt a full flush once before
349  *   resorting to BUG().
350  *
351  * Note: This hcall is expected to fail only very rarely. The correct
352  * error recovery of killing the process/guest will be eventually
353  * needed.
354  */
pseries_rpt_invalidate(u32 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)355 static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
356 					  u64 page_sizes, u64 start, u64 end)
357 {
358 	long rc;
359 	unsigned long all;
360 
361 	while (true) {
362 		rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
363 					page_sizes, start, end);
364 		if (rc == H_BUSY) {
365 			cpu_relax();
366 			continue;
367 		} else if (rc == H_SUCCESS)
368 			return rc;
369 
370 		/* Flush request failed, try with a full flush once */
371 		if (type & H_RPTI_TYPE_NESTED)
372 			all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
373 		else
374 			all = H_RPTI_TYPE_ALL;
375 retry:
376 		rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
377 					all, page_sizes, 0, -1UL);
378 		if (rc == H_BUSY) {
379 			cpu_relax();
380 			goto retry;
381 		} else if (rc == H_SUCCESS)
382 			return rc;
383 
384 		BUG();
385 	}
386 }
387 
388 #else /* !CONFIG_PPC_PSERIES */
389 
plpar_set_ciabr(unsigned long ciabr)390 static inline long plpar_set_ciabr(unsigned long ciabr)
391 {
392 	return 0;
393 }
394 
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)395 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
396 				    unsigned long *ptes)
397 {
398 	return 0;
399 }
400 
pseries_rpt_invalidate(u32 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)401 static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
402 					  u64 page_sizes, u64 start, u64 end)
403 {
404 	return 0;
405 }
406 
407 #endif /* CONFIG_PPC_PSERIES */
408 
409 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
410