1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
4
5 #include <acpi/ghes.h>
6 #include <linux/acpi.h>
7 #include <linux/arm_sdei.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/atomic.h>
10 #include <linux/bitops.h>
11 #include <linux/compiler.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/cpu.h>
14 #include <linux/cpu_pm.h>
15 #include <linux/errno.h>
16 #include <linux/hardirq.h>
17 #include <linux/kernel.h>
18 #include <linux/kprobes.h>
19 #include <linux/kvm_host.h>
20 #include <linux/list.h>
21 #include <linux/mutex.h>
22 #include <linux/notifier.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/percpu.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/ptrace.h>
29 #include <linux/preempt.h>
30 #include <linux/reboot.h>
31 #include <linux/slab.h>
32 #include <linux/smp.h>
33 #include <linux/spinlock.h>
34
35 /*
36 * The call to use to reach the firmware.
37 */
38 static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
39 unsigned long arg0, unsigned long arg1,
40 unsigned long arg2, unsigned long arg3,
41 unsigned long arg4, struct arm_smccc_res *res);
42
43 /* entry point from firmware to arch asm code */
44 static unsigned long sdei_entry_point;
45
46 static int sdei_hp_state;
47
48 struct sdei_event {
49 /* These three are protected by the sdei_list_lock */
50 struct list_head list;
51 bool reregister;
52 bool reenable;
53
54 u32 event_num;
55 u8 type;
56 u8 priority;
57
58 /* This pointer is handed to firmware as the event argument. */
59 union {
60 /* Shared events */
61 struct sdei_registered_event *registered;
62
63 /* CPU private events */
64 struct sdei_registered_event __percpu *private_registered;
65 };
66 };
67
68 /* Take the mutex for any API call or modification. Take the mutex first. */
69 static DEFINE_MUTEX(sdei_events_lock);
70
71 /* and then hold this when modifying the list */
72 static DEFINE_SPINLOCK(sdei_list_lock);
73 static LIST_HEAD(sdei_list);
74
75 /* Private events are registered/enabled via IPI passing one of these */
76 struct sdei_crosscall_args {
77 struct sdei_event *event;
78 atomic_t errors;
79 int first_error;
80 };
81
82 #define CROSSCALL_INIT(arg, event) \
83 do { \
84 arg.event = event; \
85 arg.first_error = 0; \
86 atomic_set(&arg.errors, 0); \
87 } while (0)
88
sdei_do_local_call(smp_call_func_t fn,struct sdei_event * event)89 static inline int sdei_do_local_call(smp_call_func_t fn,
90 struct sdei_event *event)
91 {
92 struct sdei_crosscall_args arg;
93
94 CROSSCALL_INIT(arg, event);
95 fn(&arg);
96
97 return arg.first_error;
98 }
99
sdei_do_cross_call(smp_call_func_t fn,struct sdei_event * event)100 static inline int sdei_do_cross_call(smp_call_func_t fn,
101 struct sdei_event *event)
102 {
103 struct sdei_crosscall_args arg;
104
105 CROSSCALL_INIT(arg, event);
106 on_each_cpu(fn, &arg, true);
107
108 return arg.first_error;
109 }
110
111 static inline void
sdei_cross_call_return(struct sdei_crosscall_args * arg,int err)112 sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
113 {
114 if (err && (atomic_inc_return(&arg->errors) == 1))
115 arg->first_error = err;
116 }
117
sdei_to_linux_errno(unsigned long sdei_err)118 static int sdei_to_linux_errno(unsigned long sdei_err)
119 {
120 switch (sdei_err) {
121 case SDEI_NOT_SUPPORTED:
122 return -EOPNOTSUPP;
123 case SDEI_INVALID_PARAMETERS:
124 return -EINVAL;
125 case SDEI_DENIED:
126 return -EPERM;
127 case SDEI_PENDING:
128 return -EINPROGRESS;
129 case SDEI_OUT_OF_RESOURCE:
130 return -ENOMEM;
131 }
132
133 return 0;
134 }
135
invoke_sdei_fn(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,u64 * result)136 static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
137 unsigned long arg1, unsigned long arg2,
138 unsigned long arg3, unsigned long arg4,
139 u64 *result)
140 {
141 int err;
142 struct arm_smccc_res res;
143
144 if (sdei_firmware_call) {
145 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
146 &res);
147 err = sdei_to_linux_errno(res.a0);
148 } else {
149 /*
150 * !sdei_firmware_call means we failed to probe or called
151 * sdei_mark_interface_broken(). -EIO is not an error returned
152 * by sdei_to_linux_errno() and is used to suppress messages
153 * from this driver.
154 */
155 err = -EIO;
156 res.a0 = SDEI_NOT_SUPPORTED;
157 }
158
159 if (result)
160 *result = res.a0;
161
162 return err;
163 }
164 NOKPROBE_SYMBOL(invoke_sdei_fn);
165
sdei_event_find(u32 event_num)166 static struct sdei_event *sdei_event_find(u32 event_num)
167 {
168 struct sdei_event *e, *found = NULL;
169
170 lockdep_assert_held(&sdei_events_lock);
171
172 spin_lock(&sdei_list_lock);
173 list_for_each_entry(e, &sdei_list, list) {
174 if (e->event_num == event_num) {
175 found = e;
176 break;
177 }
178 }
179 spin_unlock(&sdei_list_lock);
180
181 return found;
182 }
183
sdei_api_event_context(u32 query,u64 * result)184 int sdei_api_event_context(u32 query, u64 *result)
185 {
186 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
187 result);
188 }
189 NOKPROBE_SYMBOL(sdei_api_event_context);
190
sdei_api_event_get_info(u32 event,u32 info,u64 * result)191 static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
192 {
193 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
194 0, 0, result);
195 }
196
sdei_event_create(u32 event_num,sdei_event_callback * cb,void * cb_arg)197 static struct sdei_event *sdei_event_create(u32 event_num,
198 sdei_event_callback *cb,
199 void *cb_arg)
200 {
201 int err;
202 u64 result;
203 struct sdei_event *event;
204 struct sdei_registered_event *reg;
205
206 lockdep_assert_held(&sdei_events_lock);
207
208 event = kzalloc(sizeof(*event), GFP_KERNEL);
209 if (!event) {
210 err = -ENOMEM;
211 goto fail;
212 }
213
214 INIT_LIST_HEAD(&event->list);
215 event->event_num = event_num;
216
217 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
218 &result);
219 if (err)
220 goto fail;
221 event->priority = result;
222
223 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
224 &result);
225 if (err)
226 goto fail;
227 event->type = result;
228
229 if (event->type == SDEI_EVENT_TYPE_SHARED) {
230 reg = kzalloc(sizeof(*reg), GFP_KERNEL);
231 if (!reg) {
232 err = -ENOMEM;
233 goto fail;
234 }
235
236 reg->event_num = event->event_num;
237 reg->priority = event->priority;
238
239 reg->callback = cb;
240 reg->callback_arg = cb_arg;
241 event->registered = reg;
242 } else {
243 int cpu;
244 struct sdei_registered_event __percpu *regs;
245
246 regs = alloc_percpu(struct sdei_registered_event);
247 if (!regs) {
248 err = -ENOMEM;
249 goto fail;
250 }
251
252 for_each_possible_cpu(cpu) {
253 reg = per_cpu_ptr(regs, cpu);
254
255 reg->event_num = event->event_num;
256 reg->priority = event->priority;
257 reg->callback = cb;
258 reg->callback_arg = cb_arg;
259 }
260
261 event->private_registered = regs;
262 }
263
264 spin_lock(&sdei_list_lock);
265 list_add(&event->list, &sdei_list);
266 spin_unlock(&sdei_list_lock);
267
268 return event;
269
270 fail:
271 kfree(event);
272 return ERR_PTR(err);
273 }
274
sdei_event_destroy_llocked(struct sdei_event * event)275 static void sdei_event_destroy_llocked(struct sdei_event *event)
276 {
277 lockdep_assert_held(&sdei_events_lock);
278 lockdep_assert_held(&sdei_list_lock);
279
280 list_del(&event->list);
281
282 if (event->type == SDEI_EVENT_TYPE_SHARED)
283 kfree(event->registered);
284 else
285 free_percpu(event->private_registered);
286
287 kfree(event);
288 }
289
sdei_event_destroy(struct sdei_event * event)290 static void sdei_event_destroy(struct sdei_event *event)
291 {
292 spin_lock(&sdei_list_lock);
293 sdei_event_destroy_llocked(event);
294 spin_unlock(&sdei_list_lock);
295 }
296
sdei_api_get_version(u64 * version)297 static int sdei_api_get_version(u64 *version)
298 {
299 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
300 }
301
sdei_mask_local_cpu(void)302 int sdei_mask_local_cpu(void)
303 {
304 int err;
305
306 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
307 if (err && err != -EIO) {
308 pr_warn_once("failed to mask CPU[%u]: %d\n",
309 smp_processor_id(), err);
310 return err;
311 }
312
313 return 0;
314 }
315
_ipi_mask_cpu(void * ignored)316 static void _ipi_mask_cpu(void *ignored)
317 {
318 WARN_ON_ONCE(preemptible());
319 sdei_mask_local_cpu();
320 }
321
sdei_unmask_local_cpu(void)322 int sdei_unmask_local_cpu(void)
323 {
324 int err;
325
326 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
327 if (err && err != -EIO) {
328 pr_warn_once("failed to unmask CPU[%u]: %d\n",
329 smp_processor_id(), err);
330 return err;
331 }
332
333 return 0;
334 }
335
_ipi_unmask_cpu(void * ignored)336 static void _ipi_unmask_cpu(void *ignored)
337 {
338 WARN_ON_ONCE(preemptible());
339 sdei_unmask_local_cpu();
340 }
341
_ipi_private_reset(void * ignored)342 static void _ipi_private_reset(void *ignored)
343 {
344 int err;
345
346 WARN_ON_ONCE(preemptible());
347
348 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
349 NULL);
350 if (err && err != -EIO)
351 pr_warn_once("failed to reset CPU[%u]: %d\n",
352 smp_processor_id(), err);
353 }
354
sdei_api_shared_reset(void)355 static int sdei_api_shared_reset(void)
356 {
357 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
358 NULL);
359 }
360
sdei_mark_interface_broken(void)361 static void sdei_mark_interface_broken(void)
362 {
363 pr_err("disabling SDEI firmware interface\n");
364 on_each_cpu(&_ipi_mask_cpu, NULL, true);
365 sdei_firmware_call = NULL;
366 }
367
sdei_platform_reset(void)368 static int sdei_platform_reset(void)
369 {
370 int err;
371
372 on_each_cpu(&_ipi_private_reset, NULL, true);
373 err = sdei_api_shared_reset();
374 if (err) {
375 pr_err("Failed to reset platform: %d\n", err);
376 sdei_mark_interface_broken();
377 }
378
379 return err;
380 }
381
sdei_api_event_enable(u32 event_num)382 static int sdei_api_event_enable(u32 event_num)
383 {
384 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
385 0, NULL);
386 }
387
388 /* Called directly by the hotplug callbacks */
_local_event_enable(void * data)389 static void _local_event_enable(void *data)
390 {
391 int err;
392 struct sdei_crosscall_args *arg = data;
393
394 err = sdei_api_event_enable(arg->event->event_num);
395
396 sdei_cross_call_return(arg, err);
397 }
398
sdei_event_enable(u32 event_num)399 int sdei_event_enable(u32 event_num)
400 {
401 int err = -EINVAL;
402 struct sdei_event *event;
403
404 mutex_lock(&sdei_events_lock);
405 event = sdei_event_find(event_num);
406 if (!event) {
407 mutex_unlock(&sdei_events_lock);
408 return -ENOENT;
409 }
410
411
412 cpus_read_lock();
413 if (event->type == SDEI_EVENT_TYPE_SHARED)
414 err = sdei_api_event_enable(event->event_num);
415 else
416 err = sdei_do_cross_call(_local_event_enable, event);
417
418 if (!err) {
419 spin_lock(&sdei_list_lock);
420 event->reenable = true;
421 spin_unlock(&sdei_list_lock);
422 }
423 cpus_read_unlock();
424 mutex_unlock(&sdei_events_lock);
425
426 return err;
427 }
428 EXPORT_SYMBOL_GPL(sdei_event_enable);
429
sdei_api_event_disable(u32 event_num)430 static int sdei_api_event_disable(u32 event_num)
431 {
432 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
433 0, 0, NULL);
434 }
435
_ipi_event_disable(void * data)436 static void _ipi_event_disable(void *data)
437 {
438 int err;
439 struct sdei_crosscall_args *arg = data;
440
441 err = sdei_api_event_disable(arg->event->event_num);
442
443 sdei_cross_call_return(arg, err);
444 }
445
sdei_event_disable(u32 event_num)446 int sdei_event_disable(u32 event_num)
447 {
448 int err = -EINVAL;
449 struct sdei_event *event;
450
451 mutex_lock(&sdei_events_lock);
452 event = sdei_event_find(event_num);
453 if (!event) {
454 mutex_unlock(&sdei_events_lock);
455 return -ENOENT;
456 }
457
458 spin_lock(&sdei_list_lock);
459 event->reenable = false;
460 spin_unlock(&sdei_list_lock);
461
462 if (event->type == SDEI_EVENT_TYPE_SHARED)
463 err = sdei_api_event_disable(event->event_num);
464 else
465 err = sdei_do_cross_call(_ipi_event_disable, event);
466 mutex_unlock(&sdei_events_lock);
467
468 return err;
469 }
470 EXPORT_SYMBOL_GPL(sdei_event_disable);
471
sdei_api_event_unregister(u32 event_num)472 static int sdei_api_event_unregister(u32 event_num)
473 {
474 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
475 0, 0, 0, NULL);
476 }
477
478 /* Called directly by the hotplug callbacks */
_local_event_unregister(void * data)479 static void _local_event_unregister(void *data)
480 {
481 int err;
482 struct sdei_crosscall_args *arg = data;
483
484 err = sdei_api_event_unregister(arg->event->event_num);
485
486 sdei_cross_call_return(arg, err);
487 }
488
sdei_event_unregister(u32 event_num)489 int sdei_event_unregister(u32 event_num)
490 {
491 int err;
492 struct sdei_event *event;
493
494 WARN_ON(in_nmi());
495
496 mutex_lock(&sdei_events_lock);
497 event = sdei_event_find(event_num);
498 if (!event) {
499 pr_warn("Event %u not registered\n", event_num);
500 err = -ENOENT;
501 goto unlock;
502 }
503
504 spin_lock(&sdei_list_lock);
505 event->reregister = false;
506 event->reenable = false;
507 spin_unlock(&sdei_list_lock);
508
509 if (event->type == SDEI_EVENT_TYPE_SHARED)
510 err = sdei_api_event_unregister(event->event_num);
511 else
512 err = sdei_do_cross_call(_local_event_unregister, event);
513
514 if (err)
515 goto unlock;
516
517 sdei_event_destroy(event);
518 unlock:
519 mutex_unlock(&sdei_events_lock);
520
521 return err;
522 }
523 EXPORT_SYMBOL_GPL(sdei_event_unregister);
524
525 /*
526 * unregister events, but don't destroy them as they are re-registered by
527 * sdei_reregister_shared().
528 */
sdei_unregister_shared(void)529 static int sdei_unregister_shared(void)
530 {
531 int err = 0;
532 struct sdei_event *event;
533
534 mutex_lock(&sdei_events_lock);
535 spin_lock(&sdei_list_lock);
536 list_for_each_entry(event, &sdei_list, list) {
537 if (event->type != SDEI_EVENT_TYPE_SHARED)
538 continue;
539
540 err = sdei_api_event_unregister(event->event_num);
541 if (err)
542 break;
543 }
544 spin_unlock(&sdei_list_lock);
545 mutex_unlock(&sdei_events_lock);
546
547 return err;
548 }
549
sdei_api_event_register(u32 event_num,unsigned long entry_point,void * arg,u64 flags,u64 affinity)550 static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
551 void *arg, u64 flags, u64 affinity)
552 {
553 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
554 (unsigned long)entry_point, (unsigned long)arg,
555 flags, affinity, NULL);
556 }
557
558 /* Called directly by the hotplug callbacks */
_local_event_register(void * data)559 static void _local_event_register(void *data)
560 {
561 int err;
562 struct sdei_registered_event *reg;
563 struct sdei_crosscall_args *arg = data;
564
565 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
566 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
567 reg, 0, 0);
568
569 sdei_cross_call_return(arg, err);
570 }
571
sdei_event_register(u32 event_num,sdei_event_callback * cb,void * arg)572 int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
573 {
574 int err;
575 struct sdei_event *event;
576
577 WARN_ON(in_nmi());
578
579 mutex_lock(&sdei_events_lock);
580 if (sdei_event_find(event_num)) {
581 pr_warn("Event %u already registered\n", event_num);
582 err = -EBUSY;
583 goto unlock;
584 }
585
586 event = sdei_event_create(event_num, cb, arg);
587 if (IS_ERR(event)) {
588 err = PTR_ERR(event);
589 pr_warn("Failed to create event %u: %d\n", event_num, err);
590 goto unlock;
591 }
592
593 cpus_read_lock();
594 if (event->type == SDEI_EVENT_TYPE_SHARED) {
595 err = sdei_api_event_register(event->event_num,
596 sdei_entry_point,
597 event->registered,
598 SDEI_EVENT_REGISTER_RM_ANY, 0);
599 } else {
600 err = sdei_do_cross_call(_local_event_register, event);
601 if (err)
602 sdei_do_cross_call(_local_event_unregister, event);
603 }
604
605 if (err) {
606 sdei_event_destroy(event);
607 pr_warn("Failed to register event %u: %d\n", event_num, err);
608 goto cpu_unlock;
609 }
610
611 spin_lock(&sdei_list_lock);
612 event->reregister = true;
613 spin_unlock(&sdei_list_lock);
614 cpu_unlock:
615 cpus_read_unlock();
616 unlock:
617 mutex_unlock(&sdei_events_lock);
618 return err;
619 }
620 EXPORT_SYMBOL_GPL(sdei_event_register);
621
sdei_reregister_shared(void)622 static int sdei_reregister_shared(void)
623 {
624 int err = 0;
625 struct sdei_event *event;
626
627 mutex_lock(&sdei_events_lock);
628 spin_lock(&sdei_list_lock);
629 list_for_each_entry(event, &sdei_list, list) {
630 if (event->type != SDEI_EVENT_TYPE_SHARED)
631 continue;
632
633 if (event->reregister) {
634 err = sdei_api_event_register(event->event_num,
635 sdei_entry_point, event->registered,
636 SDEI_EVENT_REGISTER_RM_ANY, 0);
637 if (err) {
638 pr_err("Failed to re-register event %u\n",
639 event->event_num);
640 sdei_event_destroy_llocked(event);
641 break;
642 }
643 }
644
645 if (event->reenable) {
646 err = sdei_api_event_enable(event->event_num);
647 if (err) {
648 pr_err("Failed to re-enable event %u\n",
649 event->event_num);
650 break;
651 }
652 }
653 }
654 spin_unlock(&sdei_list_lock);
655 mutex_unlock(&sdei_events_lock);
656
657 return err;
658 }
659
sdei_cpuhp_down(unsigned int cpu)660 static int sdei_cpuhp_down(unsigned int cpu)
661 {
662 struct sdei_event *event;
663 int err;
664
665 /* un-register private events */
666 spin_lock(&sdei_list_lock);
667 list_for_each_entry(event, &sdei_list, list) {
668 if (event->type == SDEI_EVENT_TYPE_SHARED)
669 continue;
670
671 err = sdei_do_local_call(_local_event_unregister, event);
672 if (err) {
673 pr_err("Failed to unregister event %u: %d\n",
674 event->event_num, err);
675 }
676 }
677 spin_unlock(&sdei_list_lock);
678
679 return sdei_mask_local_cpu();
680 }
681
sdei_cpuhp_up(unsigned int cpu)682 static int sdei_cpuhp_up(unsigned int cpu)
683 {
684 struct sdei_event *event;
685 int err;
686
687 /* re-register/enable private events */
688 spin_lock(&sdei_list_lock);
689 list_for_each_entry(event, &sdei_list, list) {
690 if (event->type == SDEI_EVENT_TYPE_SHARED)
691 continue;
692
693 if (event->reregister) {
694 err = sdei_do_local_call(_local_event_register, event);
695 if (err) {
696 pr_err("Failed to re-register event %u: %d\n",
697 event->event_num, err);
698 }
699 }
700
701 if (event->reenable) {
702 err = sdei_do_local_call(_local_event_enable, event);
703 if (err) {
704 pr_err("Failed to re-enable event %u: %d\n",
705 event->event_num, err);
706 }
707 }
708 }
709 spin_unlock(&sdei_list_lock);
710
711 return sdei_unmask_local_cpu();
712 }
713
714 /* When entering idle, mask/unmask events for this cpu */
sdei_pm_notifier(struct notifier_block * nb,unsigned long action,void * data)715 static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
716 void *data)
717 {
718 int rv;
719
720 WARN_ON_ONCE(preemptible());
721
722 switch (action) {
723 case CPU_PM_ENTER:
724 rv = sdei_mask_local_cpu();
725 break;
726 case CPU_PM_EXIT:
727 case CPU_PM_ENTER_FAILED:
728 rv = sdei_unmask_local_cpu();
729 break;
730 default:
731 return NOTIFY_DONE;
732 }
733
734 if (rv)
735 return notifier_from_errno(rv);
736
737 return NOTIFY_OK;
738 }
739
740 static struct notifier_block sdei_pm_nb = {
741 .notifier_call = sdei_pm_notifier,
742 };
743
sdei_device_suspend(struct device * dev)744 static int sdei_device_suspend(struct device *dev)
745 {
746 on_each_cpu(_ipi_mask_cpu, NULL, true);
747
748 return 0;
749 }
750
sdei_device_resume(struct device * dev)751 static int sdei_device_resume(struct device *dev)
752 {
753 on_each_cpu(_ipi_unmask_cpu, NULL, true);
754
755 return 0;
756 }
757
758 /*
759 * We need all events to be reregistered when we resume from hibernate.
760 *
761 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
762 * events during freeze, then re-register and re-enable them during thaw
763 * and restore.
764 */
sdei_device_freeze(struct device * dev)765 static int sdei_device_freeze(struct device *dev)
766 {
767 int err;
768
769 /* unregister private events */
770 cpuhp_remove_state(sdei_hp_state);
771
772 err = sdei_unregister_shared();
773 if (err)
774 return err;
775
776 return 0;
777 }
778
sdei_device_thaw(struct device * dev)779 static int sdei_device_thaw(struct device *dev)
780 {
781 int err;
782
783 /* re-register shared events */
784 err = sdei_reregister_shared();
785 if (err) {
786 pr_warn("Failed to re-register shared events...\n");
787 sdei_mark_interface_broken();
788 return err;
789 }
790
791 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
792 &sdei_cpuhp_up, &sdei_cpuhp_down);
793 if (err < 0) {
794 pr_warn("Failed to re-register CPU hotplug notifier...\n");
795 return err;
796 }
797
798 sdei_hp_state = err;
799 return 0;
800 }
801
sdei_device_restore(struct device * dev)802 static int sdei_device_restore(struct device *dev)
803 {
804 int err;
805
806 err = sdei_platform_reset();
807 if (err)
808 return err;
809
810 return sdei_device_thaw(dev);
811 }
812
813 static const struct dev_pm_ops sdei_pm_ops = {
814 .suspend = sdei_device_suspend,
815 .resume = sdei_device_resume,
816 .freeze = sdei_device_freeze,
817 .thaw = sdei_device_thaw,
818 .restore = sdei_device_restore,
819 };
820
821 /*
822 * Mask all CPUs and unregister all events on panic, reboot or kexec.
823 */
sdei_reboot_notifier(struct notifier_block * nb,unsigned long action,void * data)824 static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
825 void *data)
826 {
827 /*
828 * We are going to reset the interface, after this there is no point
829 * doing work when we take CPUs offline.
830 */
831 cpuhp_remove_state(sdei_hp_state);
832
833 sdei_platform_reset();
834
835 return NOTIFY_OK;
836 }
837
838 static struct notifier_block sdei_reboot_nb = {
839 .notifier_call = sdei_reboot_notifier,
840 };
841
sdei_smccc_smc(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,struct arm_smccc_res * res)842 static void sdei_smccc_smc(unsigned long function_id,
843 unsigned long arg0, unsigned long arg1,
844 unsigned long arg2, unsigned long arg3,
845 unsigned long arg4, struct arm_smccc_res *res)
846 {
847 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
848 }
849 NOKPROBE_SYMBOL(sdei_smccc_smc);
850
sdei_smccc_hvc(unsigned long function_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3,unsigned long arg4,struct arm_smccc_res * res)851 static void sdei_smccc_hvc(unsigned long function_id,
852 unsigned long arg0, unsigned long arg1,
853 unsigned long arg2, unsigned long arg3,
854 unsigned long arg4, struct arm_smccc_res *res)
855 {
856 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
857 }
858 NOKPROBE_SYMBOL(sdei_smccc_hvc);
859
sdei_register_ghes(struct ghes * ghes,sdei_event_callback * normal_cb,sdei_event_callback * critical_cb)860 int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
861 sdei_event_callback *critical_cb)
862 {
863 int err;
864 u64 result;
865 u32 event_num;
866 sdei_event_callback *cb;
867
868 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
869 return -EOPNOTSUPP;
870
871 event_num = ghes->generic->notify.vector;
872 if (event_num == 0) {
873 /*
874 * Event 0 is reserved by the specification for
875 * SDEI_EVENT_SIGNAL.
876 */
877 return -EINVAL;
878 }
879
880 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
881 &result);
882 if (err)
883 return err;
884
885 if (result == SDEI_EVENT_PRIORITY_CRITICAL)
886 cb = critical_cb;
887 else
888 cb = normal_cb;
889
890 err = sdei_event_register(event_num, cb, ghes);
891 if (!err)
892 err = sdei_event_enable(event_num);
893
894 return err;
895 }
896
sdei_unregister_ghes(struct ghes * ghes)897 int sdei_unregister_ghes(struct ghes *ghes)
898 {
899 int i;
900 int err;
901 u32 event_num = ghes->generic->notify.vector;
902
903 might_sleep();
904
905 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
906 return -EOPNOTSUPP;
907
908 /*
909 * The event may be running on another CPU. Disable it
910 * to stop new events, then try to unregister a few times.
911 */
912 err = sdei_event_disable(event_num);
913 if (err)
914 return err;
915
916 for (i = 0; i < 3; i++) {
917 err = sdei_event_unregister(event_num);
918 if (err != -EINPROGRESS)
919 break;
920
921 schedule();
922 }
923
924 return err;
925 }
926
sdei_get_conduit(struct platform_device * pdev)927 static int sdei_get_conduit(struct platform_device *pdev)
928 {
929 const char *method;
930 struct device_node *np = pdev->dev.of_node;
931
932 sdei_firmware_call = NULL;
933 if (np) {
934 if (of_property_read_string(np, "method", &method)) {
935 pr_warn("missing \"method\" property\n");
936 return SMCCC_CONDUIT_NONE;
937 }
938
939 if (!strcmp("hvc", method)) {
940 sdei_firmware_call = &sdei_smccc_hvc;
941 return SMCCC_CONDUIT_HVC;
942 } else if (!strcmp("smc", method)) {
943 sdei_firmware_call = &sdei_smccc_smc;
944 return SMCCC_CONDUIT_SMC;
945 }
946
947 pr_warn("invalid \"method\" property: %s\n", method);
948 } else if (!acpi_disabled) {
949 if (acpi_psci_use_hvc()) {
950 sdei_firmware_call = &sdei_smccc_hvc;
951 return SMCCC_CONDUIT_HVC;
952 } else {
953 sdei_firmware_call = &sdei_smccc_smc;
954 return SMCCC_CONDUIT_SMC;
955 }
956 }
957
958 return SMCCC_CONDUIT_NONE;
959 }
960
sdei_probe(struct platform_device * pdev)961 static int sdei_probe(struct platform_device *pdev)
962 {
963 int err;
964 u64 ver = 0;
965 int conduit;
966
967 conduit = sdei_get_conduit(pdev);
968 if (!sdei_firmware_call)
969 return 0;
970
971 err = sdei_api_get_version(&ver);
972 if (err) {
973 pr_err("Failed to get SDEI version: %d\n", err);
974 sdei_mark_interface_broken();
975 return err;
976 }
977
978 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
979 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
980 (int)SDEI_VERSION_VENDOR(ver));
981
982 if (SDEI_VERSION_MAJOR(ver) != 1) {
983 pr_warn("Conflicting SDEI version detected.\n");
984 sdei_mark_interface_broken();
985 return -EINVAL;
986 }
987
988 err = sdei_platform_reset();
989 if (err)
990 return err;
991
992 sdei_entry_point = sdei_arch_get_entry_point(conduit);
993 if (!sdei_entry_point) {
994 /* Not supported due to hardware or boot configuration */
995 sdei_mark_interface_broken();
996 return 0;
997 }
998
999 err = cpu_pm_register_notifier(&sdei_pm_nb);
1000 if (err) {
1001 pr_warn("Failed to register CPU PM notifier...\n");
1002 goto error;
1003 }
1004
1005 err = register_reboot_notifier(&sdei_reboot_nb);
1006 if (err) {
1007 pr_warn("Failed to register reboot notifier...\n");
1008 goto remove_cpupm;
1009 }
1010
1011 err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
1012 &sdei_cpuhp_up, &sdei_cpuhp_down);
1013 if (err < 0) {
1014 pr_warn("Failed to register CPU hotplug notifier...\n");
1015 goto remove_reboot;
1016 }
1017
1018 sdei_hp_state = err;
1019
1020 return 0;
1021
1022 remove_reboot:
1023 unregister_reboot_notifier(&sdei_reboot_nb);
1024
1025 remove_cpupm:
1026 cpu_pm_unregister_notifier(&sdei_pm_nb);
1027
1028 error:
1029 sdei_mark_interface_broken();
1030 return err;
1031 }
1032
1033 static const struct of_device_id sdei_of_match[] = {
1034 { .compatible = "arm,sdei-1.0" },
1035 {}
1036 };
1037
1038 static struct platform_driver sdei_driver = {
1039 .driver = {
1040 .name = "sdei",
1041 .pm = &sdei_pm_ops,
1042 .of_match_table = sdei_of_match,
1043 },
1044 .probe = sdei_probe,
1045 };
1046
sdei_present_acpi(void)1047 static bool __init sdei_present_acpi(void)
1048 {
1049 acpi_status status;
1050 struct acpi_table_header *sdei_table_header;
1051
1052 if (acpi_disabled)
1053 return false;
1054
1055 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1056 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1057 const char *msg = acpi_format_exception(status);
1058
1059 pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1060 }
1061 if (ACPI_FAILURE(status))
1062 return false;
1063
1064 acpi_put_table(sdei_table_header);
1065
1066 return true;
1067 }
1068
acpi_sdei_init(void)1069 void __init acpi_sdei_init(void)
1070 {
1071 struct platform_device *pdev;
1072 int ret;
1073
1074 if (!sdei_present_acpi())
1075 return;
1076
1077 pdev = platform_device_register_simple(sdei_driver.driver.name,
1078 0, NULL, 0);
1079 if (IS_ERR(pdev)) {
1080 ret = PTR_ERR(pdev);
1081 platform_driver_unregister(&sdei_driver);
1082 pr_info("Failed to register ACPI:SDEI platform device %d\n",
1083 ret);
1084 }
1085 }
1086
sdei_init(void)1087 static int __init sdei_init(void)
1088 {
1089 return platform_driver_register(&sdei_driver);
1090 }
1091 arch_initcall(sdei_init);
1092
sdei_event_handler(struct pt_regs * regs,struct sdei_registered_event * arg)1093 int sdei_event_handler(struct pt_regs *regs,
1094 struct sdei_registered_event *arg)
1095 {
1096 int err;
1097 u32 event_num = arg->event_num;
1098
1099 err = arg->callback(event_num, regs, arg->callback_arg);
1100 if (err)
1101 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1102 event_num, smp_processor_id(), err);
1103
1104 return err;
1105 }
1106 NOKPROBE_SYMBOL(sdei_event_handler);
1107
sdei_handler_abort(void)1108 void sdei_handler_abort(void)
1109 {
1110 /*
1111 * If the crash happened in an SDEI event handler then we need to
1112 * finish the handler with the firmware so that we can have working
1113 * interrupts in the crash kernel.
1114 */
1115 if (__this_cpu_read(sdei_active_critical_event)) {
1116 pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
1117 __sdei_handler_abort();
1118 __this_cpu_write(sdei_active_critical_event, NULL);
1119 }
1120 if (__this_cpu_read(sdei_active_normal_event)) {
1121 pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
1122 __sdei_handler_abort();
1123 __this_cpu_write(sdei_active_normal_event, NULL);
1124 }
1125 }
1126