• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4 
5 #include <acpi/ghes.h>
6 #include <linux/acpi.h>
7 #include <linux/arm_sdei.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/kernel.h>
18 #include <linux/kprobes.h>
19 #include <linux/kvm_host.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/percpu.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/ptrace.h>
29 #include <linux/preempt.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/spinlock.h>
34 #include <linux/uaccess.h>
35 
36 /*
37  * The call to use to reach the firmware.
38  */
39 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40 		      unsigned long arg0, unsigned long arg1,
41 		      unsigned long arg2, unsigned long arg3,
42 		      unsigned long arg4, struct arm_smccc_res *res);
43 
44 /* entry point from firmware to arch asm code */
45 static unsigned long sdei_entry_point;
46 
47 static int sdei_hp_state;
48 
49 struct sdei_event {
50 	/* These three are protected by the sdei_list_lock */
51 	struct list_head	list;
52 	bool			reregister;
53 	bool			reenable;
54 
55 	u32			event_num;
56 	u8			type;
57 	u8			priority;
58 
59 	/* This pointer is handed to firmware as the event argument. */
60 	union {
61 		/* Shared events */
62 		struct sdei_registered_event *registered;
63 
64 		/* CPU private events */
65 		struct sdei_registered_event __percpu *private_registered;
66 	};
67 };
68 
69 /* Take the mutex for any API call or modification. Take the mutex first. */
70 static DEFINE_MUTEX(sdei_events_lock);
71 
72 /* and then hold this when modifying the list */
73 static DEFINE_SPINLOCK(sdei_list_lock);
74 static LIST_HEAD(sdei_list);
75 
76 /* Private events are registered/enabled via IPI passing one of these */
77 struct sdei_crosscall_args {
78 	struct sdei_event *event;
79 	atomic_t errors;
80 	int first_error;
81 };
82 
83 #define CROSSCALL_INIT(arg, event)		\
84 	do {					\
85 		arg.event = event;		\
86 		arg.first_error = 0;		\
87 		atomic_set(&arg.errors, 0);	\
88 	} while (0)
89 
sdei_do_local_call(smp_call_func_t fn,struct sdei_event * event)90 static inline int sdei_do_local_call(smp_call_func_t fn,
91 				     struct sdei_event *event)
92 {
93 	struct sdei_crosscall_args arg;
94 
95 	CROSSCALL_INIT(arg, event);
96 	fn(&arg);
97 
98 	return arg.first_error;
99 }
100 
sdei_do_cross_call(smp_call_func_t fn,struct sdei_event * event)101 static inline int sdei_do_cross_call(smp_call_func_t fn,
102 				     struct sdei_event *event)
103 {
104 	struct sdei_crosscall_args arg;
105 
106 	CROSSCALL_INIT(arg, event);
107 	on_each_cpu(fn, &arg, true);
108 
109 	return arg.first_error;
110 }
111 
112 static inline void
sdei_cross_call_return(struct sdei_crosscall_args * arg,int err)113 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
114 {
115 	if (err && (atomic_inc_return(&arg->errors) == 1))
116 		arg->first_error = err;
117 }
118 
sdei_to_linux_errno(unsigned long sdei_err)119 static int sdei_to_linux_errno(unsigned long sdei_err)
120 {
121 	switch (sdei_err) {
122 	case SDEI_NOT_SUPPORTED:
123 		return -EOPNOTSUPP;
124 	case SDEI_INVALID_PARAMETERS:
125 		return -EINVAL;
126 	case SDEI_DENIED:
127 		return -EPERM;
128 	case SDEI_PENDING:
129 		return -EINPROGRESS;
130 	case SDEI_OUT_OF_RESOURCE:
131 		return -ENOMEM;
132 	}
133 
134 	return 0;
135 }
136 
invoke_sdei_fn(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,u64 * result)137 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
138 			  unsigned long arg1, unsigned long arg2,
139 			  unsigned long arg3, unsigned long arg4,
140 			  u64 *result)
141 {
142 	int err;
143 	struct arm_smccc_res res;
144 
145 	if (sdei_firmware_call) {
146 		sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
147 				   &res);
148 		err = sdei_to_linux_errno(res.a0);
149 	} else {
150 		/*
151 		 * !sdei_firmware_call means we failed to probe or called
152 		 * sdei_mark_interface_broken(). -EIO is not an error returned
153 		 * by sdei_to_linux_errno() and is used to suppress messages
154 		 * from this driver.
155 		 */
156 		err = -EIO;
157 		res.a0 = SDEI_NOT_SUPPORTED;
158 	}
159 
160 	if (result)
161 		*result = res.a0;
162 
163 	return err;
164 }
165 NOKPROBE_SYMBOL(invoke_sdei_fn);
166 
sdei_event_find(u32 event_num)167 static struct sdei_event *sdei_event_find(u32 event_num)
168 {
169 	struct sdei_event *e, *found = NULL;
170 
171 	lockdep_assert_held(&sdei_events_lock);
172 
173 	spin_lock(&sdei_list_lock);
174 	list_for_each_entry(e, &sdei_list, list) {
175 		if (e->event_num == event_num) {
176 			found = e;
177 			break;
178 		}
179 	}
180 	spin_unlock(&sdei_list_lock);
181 
182 	return found;
183 }
184 
sdei_api_event_context(u32 query,u64 * result)185 int sdei_api_event_context(u32 query, u64 *result)
186 {
187 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
188 			      result);
189 }
190 NOKPROBE_SYMBOL(sdei_api_event_context);
191 
sdei_api_event_get_info(u32 event,u32 info,u64 * result)192 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
193 {
194 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
195 			      0, 0, result);
196 }
197 
sdei_event_create(u32 event_num,sdei_event_callback * cb,void * cb_arg)198 static struct sdei_event *sdei_event_create(u32 event_num,
199 					    sdei_event_callback *cb,
200 					    void *cb_arg)
201 {
202 	int err;
203 	u64 result;
204 	struct sdei_event *event;
205 	struct sdei_registered_event *reg;
206 
207 	lockdep_assert_held(&sdei_events_lock);
208 
209 	event = kzalloc(sizeof(*event), GFP_KERNEL);
210 	if (!event) {
211 		err = -ENOMEM;
212 		goto fail;
213 	}
214 
215 	INIT_LIST_HEAD(&event->list);
216 	event->event_num = event_num;
217 
218 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
219 				      &result);
220 	if (err)
221 		goto fail;
222 	event->priority = result;
223 
224 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
225 				      &result);
226 	if (err)
227 		goto fail;
228 	event->type = result;
229 
230 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
231 		reg = kzalloc(sizeof(*reg), GFP_KERNEL);
232 		if (!reg) {
233 			err = -ENOMEM;
234 			goto fail;
235 		}
236 
237 		reg->event_num = event->event_num;
238 		reg->priority = event->priority;
239 
240 		reg->callback = cb;
241 		reg->callback_arg = cb_arg;
242 		event->registered = reg;
243 	} else {
244 		int cpu;
245 		struct sdei_registered_event __percpu *regs;
246 
247 		regs = alloc_percpu(struct sdei_registered_event);
248 		if (!regs) {
249 			err = -ENOMEM;
250 			goto fail;
251 		}
252 
253 		for_each_possible_cpu(cpu) {
254 			reg = per_cpu_ptr(regs, cpu);
255 
256 			reg->event_num = event->event_num;
257 			reg->priority = event->priority;
258 			reg->callback = cb;
259 			reg->callback_arg = cb_arg;
260 		}
261 
262 		event->private_registered = regs;
263 	}
264 
265 	spin_lock(&sdei_list_lock);
266 	list_add(&event->list, &sdei_list);
267 	spin_unlock(&sdei_list_lock);
268 
269 	return event;
270 
271 fail:
272 	kfree(event);
273 	return ERR_PTR(err);
274 }
275 
sdei_event_destroy_llocked(struct sdei_event * event)276 static void sdei_event_destroy_llocked(struct sdei_event *event)
277 {
278 	lockdep_assert_held(&sdei_events_lock);
279 	lockdep_assert_held(&sdei_list_lock);
280 
281 	list_del(&event->list);
282 
283 	if (event->type == SDEI_EVENT_TYPE_SHARED)
284 		kfree(event->registered);
285 	else
286 		free_percpu(event->private_registered);
287 
288 	kfree(event);
289 }
290 
sdei_event_destroy(struct sdei_event * event)291 static void sdei_event_destroy(struct sdei_event *event)
292 {
293 	spin_lock(&sdei_list_lock);
294 	sdei_event_destroy_llocked(event);
295 	spin_unlock(&sdei_list_lock);
296 }
297 
sdei_api_get_version(u64 * version)298 static int sdei_api_get_version(u64 *version)
299 {
300 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
301 }
302 
sdei_mask_local_cpu(void)303 int sdei_mask_local_cpu(void)
304 {
305 	int err;
306 
307 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
308 	if (err && err != -EIO) {
309 		pr_warn_once("failed to mask CPU[%u]: %d\n",
310 			      smp_processor_id(), err);
311 		return err;
312 	}
313 
314 	return 0;
315 }
316 
_ipi_mask_cpu(void * ignored)317 static void _ipi_mask_cpu(void *ignored)
318 {
319 	WARN_ON_ONCE(preemptible());
320 	sdei_mask_local_cpu();
321 }
322 
sdei_unmask_local_cpu(void)323 int sdei_unmask_local_cpu(void)
324 {
325 	int err;
326 
327 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
328 	if (err && err != -EIO) {
329 		pr_warn_once("failed to unmask CPU[%u]: %d\n",
330 			     smp_processor_id(), err);
331 		return err;
332 	}
333 
334 	return 0;
335 }
336 
_ipi_unmask_cpu(void * ignored)337 static void _ipi_unmask_cpu(void *ignored)
338 {
339 	WARN_ON_ONCE(preemptible());
340 	sdei_unmask_local_cpu();
341 }
342 
_ipi_private_reset(void * ignored)343 static void _ipi_private_reset(void *ignored)
344 {
345 	int err;
346 
347 	WARN_ON_ONCE(preemptible());
348 
349 	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
350 			     NULL);
351 	if (err && err != -EIO)
352 		pr_warn_once("failed to reset CPU[%u]: %d\n",
353 			     smp_processor_id(), err);
354 }
355 
sdei_api_shared_reset(void)356 static int sdei_api_shared_reset(void)
357 {
358 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
359 			      NULL);
360 }
361 
sdei_mark_interface_broken(void)362 static void sdei_mark_interface_broken(void)
363 {
364 	pr_err("disabling SDEI firmware interface\n");
365 	on_each_cpu(&_ipi_mask_cpu, NULL, true);
366 	sdei_firmware_call = NULL;
367 }
368 
sdei_platform_reset(void)369 static int sdei_platform_reset(void)
370 {
371 	int err;
372 
373 	on_each_cpu(&_ipi_private_reset, NULL, true);
374 	err = sdei_api_shared_reset();
375 	if (err) {
376 		pr_err("Failed to reset platform: %d\n", err);
377 		sdei_mark_interface_broken();
378 	}
379 
380 	return err;
381 }
382 
sdei_api_event_enable(u32 event_num)383 static int sdei_api_event_enable(u32 event_num)
384 {
385 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
386 			      0, NULL);
387 }
388 
389 /* Called directly by the hotplug callbacks */
_local_event_enable(void * data)390 static void _local_event_enable(void *data)
391 {
392 	int err;
393 	struct sdei_crosscall_args *arg = data;
394 
395 	err = sdei_api_event_enable(arg->event->event_num);
396 
397 	sdei_cross_call_return(arg, err);
398 }
399 
sdei_event_enable(u32 event_num)400 int sdei_event_enable(u32 event_num)
401 {
402 	int err = -EINVAL;
403 	struct sdei_event *event;
404 
405 	mutex_lock(&sdei_events_lock);
406 	event = sdei_event_find(event_num);
407 	if (!event) {
408 		mutex_unlock(&sdei_events_lock);
409 		return -ENOENT;
410 	}
411 
412 
413 	cpus_read_lock();
414 	if (event->type == SDEI_EVENT_TYPE_SHARED)
415 		err = sdei_api_event_enable(event->event_num);
416 	else
417 		err = sdei_do_cross_call(_local_event_enable, event);
418 
419 	if (!err) {
420 		spin_lock(&sdei_list_lock);
421 		event->reenable = true;
422 		spin_unlock(&sdei_list_lock);
423 	}
424 	cpus_read_unlock();
425 	mutex_unlock(&sdei_events_lock);
426 
427 	return err;
428 }
429 
sdei_api_event_disable(u32 event_num)430 static int sdei_api_event_disable(u32 event_num)
431 {
432 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
433 			      0, 0, NULL);
434 }
435 
_ipi_event_disable(void * data)436 static void _ipi_event_disable(void *data)
437 {
438 	int err;
439 	struct sdei_crosscall_args *arg = data;
440 
441 	err = sdei_api_event_disable(arg->event->event_num);
442 
443 	sdei_cross_call_return(arg, err);
444 }
445 
sdei_event_disable(u32 event_num)446 int sdei_event_disable(u32 event_num)
447 {
448 	int err = -EINVAL;
449 	struct sdei_event *event;
450 
451 	mutex_lock(&sdei_events_lock);
452 	event = sdei_event_find(event_num);
453 	if (!event) {
454 		mutex_unlock(&sdei_events_lock);
455 		return -ENOENT;
456 	}
457 
458 	spin_lock(&sdei_list_lock);
459 	event->reenable = false;
460 	spin_unlock(&sdei_list_lock);
461 
462 	if (event->type == SDEI_EVENT_TYPE_SHARED)
463 		err = sdei_api_event_disable(event->event_num);
464 	else
465 		err = sdei_do_cross_call(_ipi_event_disable, event);
466 	mutex_unlock(&sdei_events_lock);
467 
468 	return err;
469 }
470 
sdei_api_event_unregister(u32 event_num)471 static int sdei_api_event_unregister(u32 event_num)
472 {
473 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
474 			      0, 0, 0, NULL);
475 }
476 
477 /* Called directly by the hotplug callbacks */
_local_event_unregister(void * data)478 static void _local_event_unregister(void *data)
479 {
480 	int err;
481 	struct sdei_crosscall_args *arg = data;
482 
483 	err = sdei_api_event_unregister(arg->event->event_num);
484 
485 	sdei_cross_call_return(arg, err);
486 }
487 
sdei_event_unregister(u32 event_num)488 int sdei_event_unregister(u32 event_num)
489 {
490 	int err;
491 	struct sdei_event *event;
492 
493 	WARN_ON(in_nmi());
494 
495 	mutex_lock(&sdei_events_lock);
496 	event = sdei_event_find(event_num);
497 	if (!event) {
498 		pr_warn("Event %u not registered\n", event_num);
499 		err = -ENOENT;
500 		goto unlock;
501 	}
502 
503 	spin_lock(&sdei_list_lock);
504 	event->reregister = false;
505 	event->reenable = false;
506 	spin_unlock(&sdei_list_lock);
507 
508 	if (event->type == SDEI_EVENT_TYPE_SHARED)
509 		err = sdei_api_event_unregister(event->event_num);
510 	else
511 		err = sdei_do_cross_call(_local_event_unregister, event);
512 
513 	if (err)
514 		goto unlock;
515 
516 	sdei_event_destroy(event);
517 unlock:
518 	mutex_unlock(&sdei_events_lock);
519 
520 	return err;
521 }
522 
523 /*
524  * unregister events, but don't destroy them as they are re-registered by
525  * sdei_reregister_shared().
526  */
sdei_unregister_shared(void)527 static int sdei_unregister_shared(void)
528 {
529 	int err = 0;
530 	struct sdei_event *event;
531 
532 	mutex_lock(&sdei_events_lock);
533 	spin_lock(&sdei_list_lock);
534 	list_for_each_entry(event, &sdei_list, list) {
535 		if (event->type != SDEI_EVENT_TYPE_SHARED)
536 			continue;
537 
538 		err = sdei_api_event_unregister(event->event_num);
539 		if (err)
540 			break;
541 	}
542 	spin_unlock(&sdei_list_lock);
543 	mutex_unlock(&sdei_events_lock);
544 
545 	return err;
546 }
547 
sdei_api_event_register(u32 event_num,unsigned long entry_point,void * arg,u64 flags,u64 affinity)548 static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
549 				   void *arg, u64 flags, u64 affinity)
550 {
551 	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
552 			      (unsigned long)entry_point, (unsigned long)arg,
553 			      flags, affinity, NULL);
554 }
555 
556 /* Called directly by the hotplug callbacks */
_local_event_register(void * data)557 static void _local_event_register(void *data)
558 {
559 	int err;
560 	struct sdei_registered_event *reg;
561 	struct sdei_crosscall_args *arg = data;
562 
563 	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
564 	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
565 				      reg, 0, 0);
566 
567 	sdei_cross_call_return(arg, err);
568 }
569 
sdei_event_register(u32 event_num,sdei_event_callback * cb,void * arg)570 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
571 {
572 	int err;
573 	struct sdei_event *event;
574 
575 	WARN_ON(in_nmi());
576 
577 	mutex_lock(&sdei_events_lock);
578 	if (sdei_event_find(event_num)) {
579 		pr_warn("Event %u already registered\n", event_num);
580 		err = -EBUSY;
581 		goto unlock;
582 	}
583 
584 	event = sdei_event_create(event_num, cb, arg);
585 	if (IS_ERR(event)) {
586 		err = PTR_ERR(event);
587 		pr_warn("Failed to create event %u: %d\n", event_num, err);
588 		goto unlock;
589 	}
590 
591 	cpus_read_lock();
592 	if (event->type == SDEI_EVENT_TYPE_SHARED) {
593 		err = sdei_api_event_register(event->event_num,
594 					      sdei_entry_point,
595 					      event->registered,
596 					      SDEI_EVENT_REGISTER_RM_ANY, 0);
597 	} else {
598 		err = sdei_do_cross_call(_local_event_register, event);
599 		if (err)
600 			sdei_do_cross_call(_local_event_unregister, event);
601 	}
602 
603 	if (err) {
604 		sdei_event_destroy(event);
605 		pr_warn("Failed to register event %u: %d\n", event_num, err);
606 		goto cpu_unlock;
607 	}
608 
609 	spin_lock(&sdei_list_lock);
610 	event->reregister = true;
611 	spin_unlock(&sdei_list_lock);
612 cpu_unlock:
613 	cpus_read_unlock();
614 unlock:
615 	mutex_unlock(&sdei_events_lock);
616 	return err;
617 }
618 
sdei_reregister_shared(void)619 static int sdei_reregister_shared(void)
620 {
621 	int err = 0;
622 	struct sdei_event *event;
623 
624 	mutex_lock(&sdei_events_lock);
625 	spin_lock(&sdei_list_lock);
626 	list_for_each_entry(event, &sdei_list, list) {
627 		if (event->type != SDEI_EVENT_TYPE_SHARED)
628 			continue;
629 
630 		if (event->reregister) {
631 			err = sdei_api_event_register(event->event_num,
632 					sdei_entry_point, event->registered,
633 					SDEI_EVENT_REGISTER_RM_ANY, 0);
634 			if (err) {
635 				pr_err("Failed to re-register event %u\n",
636 				       event->event_num);
637 				sdei_event_destroy_llocked(event);
638 				break;
639 			}
640 		}
641 
642 		if (event->reenable) {
643 			err = sdei_api_event_enable(event->event_num);
644 			if (err) {
645 				pr_err("Failed to re-enable event %u\n",
646 				       event->event_num);
647 				break;
648 			}
649 		}
650 	}
651 	spin_unlock(&sdei_list_lock);
652 	mutex_unlock(&sdei_events_lock);
653 
654 	return err;
655 }
656 
sdei_cpuhp_down(unsigned int cpu)657 static int sdei_cpuhp_down(unsigned int cpu)
658 {
659 	struct sdei_event *event;
660 	int err;
661 
662 	/* un-register private events */
663 	spin_lock(&sdei_list_lock);
664 	list_for_each_entry(event, &sdei_list, list) {
665 		if (event->type == SDEI_EVENT_TYPE_SHARED)
666 			continue;
667 
668 		err = sdei_do_local_call(_local_event_unregister, event);
669 		if (err) {
670 			pr_err("Failed to unregister event %u: %d\n",
671 			       event->event_num, err);
672 		}
673 	}
674 	spin_unlock(&sdei_list_lock);
675 
676 	return sdei_mask_local_cpu();
677 }
678 
sdei_cpuhp_up(unsigned int cpu)679 static int sdei_cpuhp_up(unsigned int cpu)
680 {
681 	struct sdei_event *event;
682 	int err;
683 
684 	/* re-register/enable private events */
685 	spin_lock(&sdei_list_lock);
686 	list_for_each_entry(event, &sdei_list, list) {
687 		if (event->type == SDEI_EVENT_TYPE_SHARED)
688 			continue;
689 
690 		if (event->reregister) {
691 			err = sdei_do_local_call(_local_event_register, event);
692 			if (err) {
693 				pr_err("Failed to re-register event %u: %d\n",
694 				       event->event_num, err);
695 			}
696 		}
697 
698 		if (event->reenable) {
699 			err = sdei_do_local_call(_local_event_enable, event);
700 			if (err) {
701 				pr_err("Failed to re-enable event %u: %d\n",
702 				       event->event_num, err);
703 			}
704 		}
705 	}
706 	spin_unlock(&sdei_list_lock);
707 
708 	return sdei_unmask_local_cpu();
709 }
710 
711 /* When entering idle, mask/unmask events for this cpu */
sdei_pm_notifier(struct notifier_block * nb,unsigned long action,void * data)712 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
713 			    void *data)
714 {
715 	int rv;
716 
717 	WARN_ON_ONCE(preemptible());
718 
719 	switch (action) {
720 	case CPU_PM_ENTER:
721 		rv = sdei_mask_local_cpu();
722 		break;
723 	case CPU_PM_EXIT:
724 	case CPU_PM_ENTER_FAILED:
725 		rv = sdei_unmask_local_cpu();
726 		break;
727 	default:
728 		return NOTIFY_DONE;
729 	}
730 
731 	if (rv)
732 		return notifier_from_errno(rv);
733 
734 	return NOTIFY_OK;
735 }
736 
737 static struct notifier_block sdei_pm_nb = {
738 	.notifier_call = sdei_pm_notifier,
739 };
740 
sdei_device_suspend(struct device * dev)741 static int sdei_device_suspend(struct device *dev)
742 {
743 	on_each_cpu(_ipi_mask_cpu, NULL, true);
744 
745 	return 0;
746 }
747 
sdei_device_resume(struct device * dev)748 static int sdei_device_resume(struct device *dev)
749 {
750 	on_each_cpu(_ipi_unmask_cpu, NULL, true);
751 
752 	return 0;
753 }
754 
755 /*
756  * We need all events to be reregistered when we resume from hibernate.
757  *
758  * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
759  * events during freeze, then re-register and re-enable them during thaw
760  * and restore.
761  */
sdei_device_freeze(struct device * dev)762 static int sdei_device_freeze(struct device *dev)
763 {
764 	int err;
765 
766 	/* unregister private events */
767 	cpuhp_remove_state(sdei_entry_point);
768 
769 	err = sdei_unregister_shared();
770 	if (err)
771 		return err;
772 
773 	return 0;
774 }
775 
sdei_device_thaw(struct device * dev)776 static int sdei_device_thaw(struct device *dev)
777 {
778 	int err;
779 
780 	/* re-register shared events */
781 	err = sdei_reregister_shared();
782 	if (err) {
783 		pr_warn("Failed to re-register shared events...\n");
784 		sdei_mark_interface_broken();
785 		return err;
786 	}
787 
788 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
789 				&sdei_cpuhp_up, &sdei_cpuhp_down);
790 	if (err < 0) {
791 		pr_warn("Failed to re-register CPU hotplug notifier...\n");
792 		return err;
793 	}
794 
795 	sdei_hp_state = err;
796 	return 0;
797 }
798 
sdei_device_restore(struct device * dev)799 static int sdei_device_restore(struct device *dev)
800 {
801 	int err;
802 
803 	err = sdei_platform_reset();
804 	if (err)
805 		return err;
806 
807 	return sdei_device_thaw(dev);
808 }
809 
810 static const struct dev_pm_ops sdei_pm_ops = {
811 	.suspend = sdei_device_suspend,
812 	.resume = sdei_device_resume,
813 	.freeze = sdei_device_freeze,
814 	.thaw = sdei_device_thaw,
815 	.restore = sdei_device_restore,
816 };
817 
818 /*
819  * Mask all CPUs and unregister all events on panic, reboot or kexec.
820  */
sdei_reboot_notifier(struct notifier_block * nb,unsigned long action,void * data)821 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
822 				void *data)
823 {
824 	/*
825 	 * We are going to reset the interface, after this there is no point
826 	 * doing work when we take CPUs offline.
827 	 */
828 	cpuhp_remove_state(sdei_hp_state);
829 
830 	sdei_platform_reset();
831 
832 	return NOTIFY_OK;
833 }
834 
835 static struct notifier_block sdei_reboot_nb = {
836 	.notifier_call = sdei_reboot_notifier,
837 };
838 
sdei_smccc_smc(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,struct arm_smccc_res * res)839 static void sdei_smccc_smc(unsigned long function_id,
840 			   unsigned long arg0, unsigned long arg1,
841 			   unsigned long arg2, unsigned long arg3,
842 			   unsigned long arg4, struct arm_smccc_res *res)
843 {
844 	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
845 }
846 NOKPROBE_SYMBOL(sdei_smccc_smc);
847 
sdei_smccc_hvc(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,struct arm_smccc_res * res)848 static void sdei_smccc_hvc(unsigned long function_id,
849 			   unsigned long arg0, unsigned long arg1,
850 			   unsigned long arg2, unsigned long arg3,
851 			   unsigned long arg4, struct arm_smccc_res *res)
852 {
853 	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
854 }
855 NOKPROBE_SYMBOL(sdei_smccc_hvc);
856 
sdei_register_ghes(struct ghes * ghes,sdei_event_callback * normal_cb,sdei_event_callback * critical_cb)857 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
858 		       sdei_event_callback *critical_cb)
859 {
860 	int err;
861 	u64 result;
862 	u32 event_num;
863 	sdei_event_callback *cb;
864 
865 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
866 		return -EOPNOTSUPP;
867 
868 	event_num = ghes->generic->notify.vector;
869 	if (event_num == 0) {
870 		/*
871 		 * Event 0 is reserved by the specification for
872 		 * SDEI_EVENT_SIGNAL.
873 		 */
874 		return -EINVAL;
875 	}
876 
877 	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
878 				      &result);
879 	if (err)
880 		return err;
881 
882 	if (result == SDEI_EVENT_PRIORITY_CRITICAL)
883 		cb = critical_cb;
884 	else
885 		cb = normal_cb;
886 
887 	err = sdei_event_register(event_num, cb, ghes);
888 	if (!err)
889 		err = sdei_event_enable(event_num);
890 
891 	return err;
892 }
893 
sdei_unregister_ghes(struct ghes * ghes)894 int sdei_unregister_ghes(struct ghes *ghes)
895 {
896 	int i;
897 	int err;
898 	u32 event_num = ghes->generic->notify.vector;
899 
900 	might_sleep();
901 
902 	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
903 		return -EOPNOTSUPP;
904 
905 	/*
906 	 * The event may be running on another CPU. Disable it
907 	 * to stop new events, then try to unregister a few times.
908 	 */
909 	err = sdei_event_disable(event_num);
910 	if (err)
911 		return err;
912 
913 	for (i = 0; i < 3; i++) {
914 		err = sdei_event_unregister(event_num);
915 		if (err != -EINPROGRESS)
916 			break;
917 
918 		schedule();
919 	}
920 
921 	return err;
922 }
923 
sdei_get_conduit(struct platform_device * pdev)924 static int sdei_get_conduit(struct platform_device *pdev)
925 {
926 	const char *method;
927 	struct device_node *np = pdev->dev.of_node;
928 
929 	sdei_firmware_call = NULL;
930 	if (np) {
931 		if (of_property_read_string(np, "method", &method)) {
932 			pr_warn("missing \"method\" property\n");
933 			return SMCCC_CONDUIT_NONE;
934 		}
935 
936 		if (!strcmp("hvc", method)) {
937 			sdei_firmware_call = &sdei_smccc_hvc;
938 			return SMCCC_CONDUIT_HVC;
939 		} else if (!strcmp("smc", method)) {
940 			sdei_firmware_call = &sdei_smccc_smc;
941 			return SMCCC_CONDUIT_SMC;
942 		}
943 
944 		pr_warn("invalid \"method\" property: %s\n", method);
945 	} else if (!acpi_disabled) {
946 		if (acpi_psci_use_hvc()) {
947 			sdei_firmware_call = &sdei_smccc_hvc;
948 			return SMCCC_CONDUIT_HVC;
949 		} else {
950 			sdei_firmware_call = &sdei_smccc_smc;
951 			return SMCCC_CONDUIT_SMC;
952 		}
953 	}
954 
955 	return SMCCC_CONDUIT_NONE;
956 }
957 
sdei_probe(struct platform_device * pdev)958 static int sdei_probe(struct platform_device *pdev)
959 {
960 	int err;
961 	u64 ver = 0;
962 	int conduit;
963 
964 	conduit = sdei_get_conduit(pdev);
965 	if (!sdei_firmware_call)
966 		return 0;
967 
968 	err = sdei_api_get_version(&ver);
969 	if (err) {
970 		pr_err("Failed to get SDEI version: %d\n", err);
971 		sdei_mark_interface_broken();
972 		return err;
973 	}
974 
975 	pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
976 		(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
977 		(int)SDEI_VERSION_VENDOR(ver));
978 
979 	if (SDEI_VERSION_MAJOR(ver) != 1) {
980 		pr_warn("Conflicting SDEI version detected.\n");
981 		sdei_mark_interface_broken();
982 		return -EINVAL;
983 	}
984 
985 	err = sdei_platform_reset();
986 	if (err)
987 		return err;
988 
989 	sdei_entry_point = sdei_arch_get_entry_point(conduit);
990 	if (!sdei_entry_point) {
991 		/* Not supported due to hardware or boot configuration */
992 		sdei_mark_interface_broken();
993 		return 0;
994 	}
995 
996 	err = cpu_pm_register_notifier(&sdei_pm_nb);
997 	if (err) {
998 		pr_warn("Failed to register CPU PM notifier...\n");
999 		goto error;
1000 	}
1001 
1002 	err = register_reboot_notifier(&sdei_reboot_nb);
1003 	if (err) {
1004 		pr_warn("Failed to register reboot notifier...\n");
1005 		goto remove_cpupm;
1006 	}
1007 
1008 	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
1009 				&sdei_cpuhp_up, &sdei_cpuhp_down);
1010 	if (err < 0) {
1011 		pr_warn("Failed to register CPU hotplug notifier...\n");
1012 		goto remove_reboot;
1013 	}
1014 
1015 	sdei_hp_state = err;
1016 
1017 	return 0;
1018 
1019 remove_reboot:
1020 	unregister_reboot_notifier(&sdei_reboot_nb);
1021 
1022 remove_cpupm:
1023 	cpu_pm_unregister_notifier(&sdei_pm_nb);
1024 
1025 error:
1026 	sdei_mark_interface_broken();
1027 	return err;
1028 }
1029 
1030 static const struct of_device_id sdei_of_match[] = {
1031 	{ .compatible = "arm,sdei-1.0" },
1032 	{}
1033 };
1034 
1035 static struct platform_driver sdei_driver = {
1036 	.driver		= {
1037 		.name			= "sdei",
1038 		.pm			= &sdei_pm_ops,
1039 		.of_match_table		= sdei_of_match,
1040 	},
1041 	.probe		= sdei_probe,
1042 };
1043 
sdei_present_acpi(void)1044 static bool __init sdei_present_acpi(void)
1045 {
1046 	acpi_status status;
1047 	struct acpi_table_header *sdei_table_header;
1048 
1049 	if (acpi_disabled)
1050 		return false;
1051 
1052 	status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1053 	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1054 		const char *msg = acpi_format_exception(status);
1055 
1056 		pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1057 	}
1058 	if (ACPI_FAILURE(status))
1059 		return false;
1060 
1061 	acpi_put_table(sdei_table_header);
1062 
1063 	return true;
1064 }
1065 
sdei_init(void)1066 static int __init sdei_init(void)
1067 {
1068 	struct platform_device *pdev;
1069 	int ret;
1070 
1071 	ret = platform_driver_register(&sdei_driver);
1072 	if (ret || !sdei_present_acpi())
1073 		return ret;
1074 
1075 	pdev = platform_device_register_simple(sdei_driver.driver.name,
1076 					       0, NULL, 0);
1077 	if (IS_ERR(pdev)) {
1078 		ret = PTR_ERR(pdev);
1079 		platform_driver_unregister(&sdei_driver);
1080 		pr_info("Failed to register ACPI:SDEI platform device %d\n",
1081 			ret);
1082 	}
1083 
1084 	return ret;
1085 }
1086 
1087 /*
1088  * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1089  * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1090  * by device_initcall(). We want to be called in the middle.
1091  */
1092 subsys_initcall_sync(sdei_init);
1093 
sdei_event_handler(struct pt_regs * regs,struct sdei_registered_event * arg)1094 int sdei_event_handler(struct pt_regs *regs,
1095 		       struct sdei_registered_event *arg)
1096 {
1097 	int err;
1098 	mm_segment_t orig_addr_limit;
1099 	u32 event_num = arg->event_num;
1100 
1101 	/*
1102 	 * Save restore 'fs'.
1103 	 * The architecture's entry code save/restores 'fs' when taking an
1104 	 * exception from the kernel. This ensures addr_limit isn't inherited
1105 	 * if you interrupted something that allowed the uaccess routines to
1106 	 * access kernel memory.
1107 	 * Do the same here because this doesn't come via the same entry code.
1108 	*/
1109 	orig_addr_limit = force_uaccess_begin();
1110 
1111 	err = arg->callback(event_num, regs, arg->callback_arg);
1112 	if (err)
1113 		pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1114 				   event_num, smp_processor_id(), err);
1115 
1116 	force_uaccess_end(orig_addr_limit);
1117 
1118 	return err;
1119 }
1120 NOKPROBE_SYMBOL(sdei_event_handler);
1121